hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0b3a1dd34d4af3e41ec71b11a5b0c0289b6b2cb
| 1,527
|
py
|
Python
|
partname_resolver/components/test_inductor.py
|
sakoPO/partname-resolver
|
ad881eb147b005f0e833a1c78fa9fc4b8b7a33bb
|
[
"BSD-3-Clause"
] | null | null | null |
partname_resolver/components/test_inductor.py
|
sakoPO/partname-resolver
|
ad881eb147b005f0e833a1c78fa9fc4b8b7a33bb
|
[
"BSD-3-Clause"
] | null | null | null |
partname_resolver/components/test_inductor.py
|
sakoPO/partname-resolver
|
ad881eb147b005f0e833a1c78fa9fc4b8b7a33bb
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from partname_resolver.components.inductor import Inductor
from partname_resolver.units.temperature import TemperatureRange
| 41.27027
| 78
| 0.513425
|
import unittest
from partname_resolver.components.inductor import Inductor
from partname_resolver.units.temperature import TemperatureRange
class InductorTestCase(unittest.TestCase):
def test_equality(self):
A = Inductor(inductor_type=Inductor.Type.MultilayerInductor,
manufacturer="Murata Manufacturing",
partnumber="LQG18HNR10J00D",
working_temperature_range=TemperatureRange('-55', '125'),
series="LQG",
inductance='100nH',
tolerance=None,
q='8',
dc_resistance=None,
rated_current=None,
self_resonant_frequency=None,
max_working_voltage=None,
case=None,
note=None)
B = Inductor(inductor_type=Inductor.Type.MultilayerInductor,
manufacturer="Murata Manufacturing",
partnumber="LQG18HNR10J00D",
working_temperature_range=TemperatureRange('-55', '125'),
series="LQG",
inductance='100nH',
tolerance=None,
q='8',
dc_resistance=None,
rated_current=None,
self_resonant_frequency=None,
max_working_voltage=None,
case=None,
note=None)
self.assertEqual(B, A)
| 1,316
| 21
| 49
|
05d44356b2346be5b170ee2ffa3cfc38f895d3ff
| 3,433
|
py
|
Python
|
setup.py
|
oarepo/cesnet-openid-remote
|
4ca46fc94801e51267b7676e0c212a024e3af3a1
|
[
"MIT"
] | null | null | null |
setup.py
|
oarepo/cesnet-openid-remote
|
4ca46fc94801e51267b7676e0c212a024e3af3a1
|
[
"MIT"
] | 4
|
2021-02-19T10:53:28.000Z
|
2021-04-09T17:15:56.000Z
|
setup.py
|
oarepo/cesnet-openid-remote
|
4ca46fc94801e51267b7676e0c212a024e3af3a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CESNET.
#
# CESNET-OpenID-Remote is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""CESNET OIDC Auth backend for OARepo"""
import os
from setuptools import find_packages, setup
readme = open('README.md').read()
history = open('CHANGES.rst').read()
OAREPO_VERSION = os.environ.get('OAREPO_VERSION', '3.3.0')
tests_require = [
'pydocstyle',
'isort',
'oarepo-communities>=1.1.0',
'invenio-oauthclient==1.4.0'
]
extras_require = {
'tests': [
'oarepo[tests]~={version}'.format(version=OAREPO_VERSION),
*tests_require
]
}
extras_require['all'] = []
for reqs in extras_require.values():
extras_require['all'].extend(reqs)
setup_requires = [
]
install_requires = [
'urnparse>=0.2.0',
'invenio-openid-connect>=2.1.0',
]
packages = find_packages(exclude=['examples', 'tests'])
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('cesnet_openid_remote', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='cesnet-openid-remote',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
keywords='invenio oarepo oauth openidc auth groups',
license='MIT',
author='Miroslav Bauer',
author_email='bauer@cesnet.cz',
url='https://github.com/oarepo/cesnet-openid-remote',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'flask.commands': [
'cesnet:group = cesnet_openid_remote.cli:cesnet_group',
],
'invenio_base.apps': [
'cesnet_openid_remote = cesnet_openid_remote:CESNETOpenIDRemote',
],
# TODO: Edit these entry points to fit your needs.
# 'invenio_access.actions': [],
# 'invenio_admin.actions': [],
# 'invenio_assets.bundles': [],
'invenio_base.api_apps': [
'cesnet_openid_remote = cesnet_openid_remote:CESNETOpenIDRemote',
],
# 'invenio_base.api_blueprints': [],
# 'invenio_base.blueprints': [],
# 'invenio_celery.tasks': [],
'invenio_db.models': [
'cesnet_openid_remote = cesnet_openid_remote.models',
],
'invenio_db.alembic': [
'cesnet_openid_remote = cesnet_openid_remote:alembic',
],
# 'invenio_pidstore.minters': [],
# 'invenio_records.jsonresolver': [],
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Development Status :: 1 - Planning',
],
)
| 29.594828
| 77
| 0.630061
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CESNET.
#
# CESNET-OpenID-Remote is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""CESNET OIDC Auth backend for OARepo"""
import os
from setuptools import find_packages, setup
readme = open('README.md').read()
history = open('CHANGES.rst').read()
OAREPO_VERSION = os.environ.get('OAREPO_VERSION', '3.3.0')
tests_require = [
'pydocstyle',
'isort',
'oarepo-communities>=1.1.0',
'invenio-oauthclient==1.4.0'
]
extras_require = {
'tests': [
'oarepo[tests]~={version}'.format(version=OAREPO_VERSION),
*tests_require
]
}
extras_require['all'] = []
for reqs in extras_require.values():
extras_require['all'].extend(reqs)
setup_requires = [
]
install_requires = [
'urnparse>=0.2.0',
'invenio-openid-connect>=2.1.0',
]
packages = find_packages(exclude=['examples', 'tests'])
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('cesnet_openid_remote', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='cesnet-openid-remote',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
keywords='invenio oarepo oauth openidc auth groups',
license='MIT',
author='Miroslav Bauer',
author_email='bauer@cesnet.cz',
url='https://github.com/oarepo/cesnet-openid-remote',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'flask.commands': [
'cesnet:group = cesnet_openid_remote.cli:cesnet_group',
],
'invenio_base.apps': [
'cesnet_openid_remote = cesnet_openid_remote:CESNETOpenIDRemote',
],
# TODO: Edit these entry points to fit your needs.
# 'invenio_access.actions': [],
# 'invenio_admin.actions': [],
# 'invenio_assets.bundles': [],
'invenio_base.api_apps': [
'cesnet_openid_remote = cesnet_openid_remote:CESNETOpenIDRemote',
],
# 'invenio_base.api_blueprints': [],
# 'invenio_base.blueprints': [],
# 'invenio_celery.tasks': [],
'invenio_db.models': [
'cesnet_openid_remote = cesnet_openid_remote.models',
],
'invenio_db.alembic': [
'cesnet_openid_remote = cesnet_openid_remote:alembic',
],
# 'invenio_pidstore.minters': [],
# 'invenio_records.jsonresolver': [],
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Development Status :: 1 - Planning',
],
)
| 0
| 0
| 0
|
0098e30c2ef9ea5e88b69f0c7ac211522ab302b6
| 476
|
py
|
Python
|
src/pos/utils.py
|
ExplorerFreda/sub2-augmentation
|
3f43e72a1b4eb5201472938ede8ea0fbe97be7c3
|
[
"MIT"
] | 6
|
2021-07-14T22:49:32.000Z
|
2021-08-22T14:32:17.000Z
|
src/pos/utils.py
|
ExplorerFreda/sub2-augmentation
|
3f43e72a1b4eb5201472938ede8ea0fbe97be7c3
|
[
"MIT"
] | null | null | null |
src/pos/utils.py
|
ExplorerFreda/sub2-augmentation
|
3f43e72a1b4eb5201472938ede8ea0fbe97be7c3
|
[
"MIT"
] | 1
|
2021-07-15T03:19:39.000Z
|
2021-07-15T03:19:39.000Z
|
high_resource_language_list = [
'bg', 'cs', 'da', 'de', 'en', 'es', 'eu', 'fa', 'fi', 'fr', 'he', 'hi',
'hr', 'id', 'it', 'nl', 'no', 'pl', 'pt', 'sl', 'sv'
]
low_resource_language_list = [
'el', 'et', 'ga', 'hu', 'ro', 'ta'
]
extra_language_list_ud12 = [
'ar', 'cu', 'fi_ftb', 'got', 'grc', 'grc_proiel',
'la', 'la_itt', 'la_proiel'
]
extra_low_resource_language_list_ud26 = [
'be_hse', 'cop_scriptorium', 'lt_hse', 'mr_ufal', 'ta_ttb', 'te_mtg'
]
| 31.733333
| 76
| 0.539916
|
high_resource_language_list = [
'bg', 'cs', 'da', 'de', 'en', 'es', 'eu', 'fa', 'fi', 'fr', 'he', 'hi',
'hr', 'id', 'it', 'nl', 'no', 'pl', 'pt', 'sl', 'sv'
]
low_resource_language_list = [
'el', 'et', 'ga', 'hu', 'ro', 'ta'
]
extra_language_list_ud12 = [
'ar', 'cu', 'fi_ftb', 'got', 'grc', 'grc_proiel',
'la', 'la_itt', 'la_proiel'
]
extra_low_resource_language_list_ud26 = [
'be_hse', 'cop_scriptorium', 'lt_hse', 'mr_ufal', 'ta_ttb', 'te_mtg'
]
| 0
| 0
| 0
|
6a05f2d56018956a4ac6c006679c41bfd597e952
| 7,071
|
py
|
Python
|
cirq/ion/ion_device.py
|
joshp112358/Cirq
|
c4fac27a9849e589ee05b4f702f2d7c9049aaeea
|
[
"Apache-2.0"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
cirq/ion/ion_device.py
|
joshp112358/Cirq
|
c4fac27a9849e589ee05b4f702f2d7c9049aaeea
|
[
"Apache-2.0"
] | 4
|
2020-11-27T09:34:13.000Z
|
2021-04-30T21:13:41.000Z
|
cirq/ion/ion_device.py
|
joshp112358/Cirq
|
c4fac27a9849e589ee05b4f702f2d7c9049aaeea
|
[
"Apache-2.0"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast, Iterable, Optional, Set, TYPE_CHECKING, FrozenSet
from cirq import circuits, value, devices, ops, protocols
from cirq.ion import convert_to_ion_gates
if TYPE_CHECKING:
import cirq
@value.value_equality
class IonDevice(devices.Device):
"""A device with qubits placed on a line.
Qubits have all-to-all connectivity.
"""
def __init__(self, measurement_duration: 'cirq.DURATION_LIKE',
twoq_gates_duration: 'cirq.DURATION_LIKE',
oneq_gates_duration: 'cirq.DURATION_LIKE',
qubits: Iterable[devices.LineQubit]) -> None:
"""Initializes the description of an ion trap device.
Args:
measurement_duration: The maximum duration of a measurement.
twoq_gates_duration: The maximum duration of a two qubit operation.
oneq_gates_duration: The maximum duration of a single qubit
operation.
qubits: Qubits on the device, identified by their x, y location.
"""
self._measurement_duration = value.Duration(measurement_duration)
self._twoq_gates_duration = value.Duration(twoq_gates_duration)
self._oneq_gates_duration = value.Duration(oneq_gates_duration)
self.qubits = frozenset(qubits)
def at(self, position: int) -> Optional[devices.LineQubit]:
"""Returns the qubit at the given position, if there is one, else None.
"""
q = devices.LineQubit(position)
return q if q in self.qubits else None
def neighbors_of(self, qubit: devices.LineQubit):
"""Returns the qubits that the given qubit can interact with."""
possibles = [
devices.LineQubit(qubit.x + 1),
devices.LineQubit(qubit.x - 1),
]
return [e for e in possibles if e in self.qubits]
| 39.949153
| 80
| 0.623391
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast, Iterable, Optional, Set, TYPE_CHECKING, FrozenSet
from cirq import circuits, value, devices, ops, protocols
from cirq.ion import convert_to_ion_gates
if TYPE_CHECKING:
import cirq
@value.value_equality
class IonDevice(devices.Device):
"""A device with qubits placed on a line.
Qubits have all-to-all connectivity.
"""
def __init__(self, measurement_duration: 'cirq.DURATION_LIKE',
twoq_gates_duration: 'cirq.DURATION_LIKE',
oneq_gates_duration: 'cirq.DURATION_LIKE',
qubits: Iterable[devices.LineQubit]) -> None:
"""Initializes the description of an ion trap device.
Args:
measurement_duration: The maximum duration of a measurement.
twoq_gates_duration: The maximum duration of a two qubit operation.
oneq_gates_duration: The maximum duration of a single qubit
operation.
qubits: Qubits on the device, identified by their x, y location.
"""
self._measurement_duration = value.Duration(measurement_duration)
self._twoq_gates_duration = value.Duration(twoq_gates_duration)
self._oneq_gates_duration = value.Duration(oneq_gates_duration)
self.qubits = frozenset(qubits)
def qubit_set(self) -> FrozenSet['cirq.LineQubit']:
return self.qubits
def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:
return convert_to_ion_gates.ConvertToIonGates().convert_one(operation)
def decompose_circuit(self, circuit: circuits.Circuit) -> circuits.Circuit:
return convert_to_ion_gates.ConvertToIonGates().convert_circuit(circuit)
def duration_of(self, operation):
if isinstance(operation.gate, ops.XXPowGate):
return self._twoq_gates_duration
if isinstance(
operation.gate,
(ops.XPowGate, ops.YPowGate, ops.ZPowGate, ops.PhasedXPowGate)):
return self._oneq_gates_duration
if isinstance(operation.gate, ops.MeasurementGate):
return self._measurement_duration
raise ValueError('Unsupported gate type: {!r}'.format(operation))
def validate_gate(self, gate: ops.Gate):
if not isinstance(
gate, (ops.XPowGate, ops.YPowGate, ops.ZPowGate,
ops.PhasedXPowGate, ops.XXPowGate, ops.MeasurementGate)):
raise ValueError('Unsupported gate type: {!r}'.format(gate))
def validate_operation(self, operation):
if not isinstance(operation, ops.GateOperation):
raise ValueError('Unsupported operation: {!r}'.format(operation))
self.validate_gate(operation.gate)
for q in operation.qubits:
if not isinstance(q, devices.LineQubit):
raise ValueError('Unsupported qubit type: {!r}'.format(q))
if q not in self.qubits:
raise ValueError('Qubit not on device: {!r}'.format(q))
def _check_if_XXPow_operation_interacts_with_any(
self,
XXPow_op: ops.GateOperation,
others: Iterable[ops.GateOperation]) -> bool:
return any(self._check_if_XXPow_operation_interacts(XXPow_op, op)
for op in others)
def _check_if_XXPow_operation_interacts(
self,
XXPow_op: ops.GateOperation,
other_op: ops.GateOperation) -> bool:
if isinstance(other_op.gate, (ops.XPowGate,
ops.YPowGate,
ops.PhasedXPowGate,
ops.MeasurementGate,
ops.ZPowGate)):
return False
return any(q == p
for q in XXPow_op.qubits
for p in other_op.qubits)
def validate_circuit(self, circuit: circuits.Circuit):
super().validate_circuit(circuit)
_verify_unique_measurement_keys(circuit.all_operations())
def can_add_operation_into_moment(self,
operation: ops.Operation,
moment: ops.Moment) -> bool:
if not super().can_add_operation_into_moment(operation, moment):
return False
if isinstance(operation.gate, ops.XXPowGate):
return not self._check_if_XXPow_operation_interacts_with_any(
cast(ops.GateOperation, operation),
cast(Iterable[ops.GateOperation], moment.operations))
return True
def at(self, position: int) -> Optional[devices.LineQubit]:
"""Returns the qubit at the given position, if there is one, else None.
"""
q = devices.LineQubit(position)
return q if q in self.qubits else None
def neighbors_of(self, qubit: devices.LineQubit):
"""Returns the qubits that the given qubit can interact with."""
possibles = [
devices.LineQubit(qubit.x + 1),
devices.LineQubit(qubit.x - 1),
]
return [e for e in possibles if e in self.qubits]
def __repr__(self):
return ('IonDevice(measurement_duration={!r}, '
'twoq_gates_duration={!r}, '
'oneq_gates_duration={!r} '
'qubits={!r})').format(self._measurement_duration,
self._twoq_gates_duration,
self._oneq_gates_duration,
sorted(self.qubits))
def __str__(self):
diagram = circuits.TextDiagramDrawer()
for q in self.qubits:
diagram.write(q.x, 0, str(q))
for q2 in self.neighbors_of(q):
diagram.grid_line(q.x, 0, q2.x, 0)
return diagram.render(
horizontal_spacing=3,
vertical_spacing=2,
use_unicode_characters=True)
def _value_equality_values_(self):
return (self._measurement_duration,
self._twoq_gates_duration,
self._oneq_gates_duration,
self.qubits)
def _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):
seen: Set[str] = set()
for op in operations:
if isinstance(op.gate, ops.MeasurementGate):
meas = op.gate
key = protocols.measurement_key(meas)
if key in seen:
raise ValueError('Measurement key {} repeated'.format(key))
seen.add(key)
| 4,274
| 0
| 374
|
4ab3bb9ddd1da5119cd9c519636fbb494ba987ad
| 10,910
|
py
|
Python
|
examples/tts/ljspeech/local/prepare_data.py
|
wgfi110/athena
|
e704884ec6a3a947769d892aa267578038e49ecb
|
[
"Apache-2.0"
] | 791
|
2019-12-22T03:09:04.000Z
|
2022-03-26T01:57:42.000Z
|
examples/tts/ljspeech/local/prepare_data.py
|
wgfi110/athena
|
e704884ec6a3a947769d892aa267578038e49ecb
|
[
"Apache-2.0"
] | 198
|
2019-12-22T03:06:27.000Z
|
2022-03-29T02:57:59.000Z
|
examples/tts/ljspeech/local/prepare_data.py
|
wgfi110/athena
|
e704884ec6a3a947769d892aa267578038e49ecb
|
[
"Apache-2.0"
] | 194
|
2019-12-24T03:59:29.000Z
|
2022-03-25T02:44:51.000Z
|
#coding=utf-8
# Copyright (C) 2020 ATHENA AUTHORS; LanYu;
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" LJspeech dataset
This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker
reading passages from 7 non-fiction books. A transcription is provided for each clip.
Clips vary in length from 1 to 10 seconds and have a total length of approximately 24 hours.
detailed information can be seen on https://keithito.com/LJ-Speech-Dataset
"""
import os
import re
import sys
import tarfile
import inflect
import urllib
import tempfile
import codecs
import pandas
from absl import logging
from sklearn.model_selection import train_test_split
from unidecode import unidecode
import tensorflow as tf
from athena import get_wave_file_length
GFILE = tf.compat.v1.gfile
URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
#------------normalize_numbers--------------#
_INFLECT = inflect.engine()
_COMMA_NUMBER_RE = re.compile(r'([0-9][0-9\,]+[0-9])')
_DECIMAL_NUMBER_RE = re.compile(r'([0-9]+\.[0-9]+)')
_POUNDS_RE = re.compile(r'£([0-9\,]*[0-9]+)')
_DOLLARS_RE = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ORDINAL_RE = re.compile(r'[0-9]+(st|nd|rd|th)')
_NUMBER_RE = re.compile(r'[0-9]+')
def normalize_numbers(text):
"""
normalize numbers in text
"""
text = re.sub(_COMMA_NUMBER_RE, _remove_commas, text)
text = re.sub(_POUNDS_RE, r'\1 pounds', text)
text = re.sub(_DOLLARS_RE, _expand_dollars, text)
text = re.sub(_DECIMAL_NUMBER_RE, _expand_decimal_point, text)
text = re.sub(_ORDINAL_RE, _expand_ordinal, text)
text = re.sub(_NUMBER_RE, _expand_number, text)
return text
#---------------clean_text---------------#
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('Mrs', 'Misess'),
('Mr', 'Mister'),
('Dr', 'Doctor'),
('St', 'Saint'),
('Co', 'Company'),
('Jr', 'Junior'),
('Maj', 'Major'),
('Gen', 'General'),
('Drs', 'Doctors'),
('Rev', 'Reverend'),
('Lt', 'Lieutenant'),
('Hon', 'Honorable'),
('Sgt', 'Sergeant'),
('Capt', 'Captain'),
('Esq', 'Esquire'),
('Ltd', 'Limited'),
('Col', 'Colonel'),
('Ft', 'Fort'),
]]
def expand_abbreviations(text):
"""
expand abbreviations in text
"""
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(text):
"""
collapse whitespace in text
"""
return re.sub(_whitespace_re, ' ', text)
# NOTE (kan-bayashi): Following functions additionally defined, not inclueded in original codes.
def remove_unnecessary_symbols(text):
"""
remove unnecessary symbols in text
"""
text = re.sub(r'[\(\)\[\]\<\>\"]+', '', text)
return text
def expand_symbols(text):
"""
expand symbols in text
"""
text = re.sub("\;", ",", text)
text = re.sub("\:", ",", text)
text = re.sub("\-", " ", text)
text = re.sub("\&", "and", text)
return text
def preprocess(text):
'''Custom pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = normalize_numbers(text)
text = expand_abbreviations(text)
text = expand_symbols(text)
text = remove_unnecessary_symbols(text)
text = collapse_whitespace(text)
return text
def download_and_extract(directory, url):
"""Download and extract the given split of dataset.
Args:
directory: the directory where to extract the tarball.
url: the url to download the data file.
"""
if not GFILE.Exists(directory):
GFILE.MakeDirs(directory)
_, tar_filepath = tempfile.mkstemp(suffix=".tar.bz2")
try:
logging.info("Downloading %s to %s" % (url, tar_filepath))
urllib.request.urlretrieve(url, tar_filepath, _progress)
statinfo = os.stat(tar_filepath)
logging.info(
"Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size)
)
with tarfile.open(tar_filepath, "r") as tar:
tar.extractall(directory)
logging.info("Successfully extracted data from LJSpeech-1.1.tar.bz2")
finally:
GFILE.Remove(tar_filepath)
#----------------create total.csv-----------------
def convert_audio_and_split_transcript(dataset_dir, total_csv_path):
"""Convert rar to WAV and split the transcript.
Args:
dataset_dir : the directory which holds the input dataset.
total_csv_path : the resulting output csv file.
LJSpeech-1.1 dir Tree structure:
LJSpeech-1.1
-metadata.csv
-LJ001-0002|in being comparatively modern.|in being comparatively modern.
...
-wavs
-LJ001-0001.wav
-LJ001-0002.wav
...
-LJ050-0278
-pcms
-audio-LJ001-0001.s16
-audio-LJ001-0002.s16
...
"""
logging.info("Processing audio and transcript for {}".format("all_files"))
wav_dir = os.path.join(dataset_dir, "LJSpeech-1.1/wavs/")
files = []
# ProsodyLabel ---word
with codecs.open(os.path.join(dataset_dir, "LJSpeech-1.1/metadata.csv"),
"r",
encoding="utf-8") as f:
for line in f:
wav_name = line.split('|')[0] + '.wav'
wav_file = os.path.join(wav_dir, wav_name)
wav_length = get_wave_file_length(wav_file)
#get transcript
content = line.split('|')[2]
clean_content = preprocess(content.rstrip())
transcript = ' '.join(list(clean_content))
transcript = transcript.replace(' ', ' <space>')
transcript = 'sp1 ' + transcript + ' sil' #' sil\n'
files.append((os.path.abspath(wav_file), wav_length, transcript))
# Write to txt file which contains three columns:
fp = open(total_csv_path, 'w', encoding="utf-8")
fp.write("wav_filename"+'\t'
"wav_length_ms"+'\t'
"transcript"+'\n')
for i in range(len(files)):
fp.write(str(files[i][0])+'\t')
fp.write(str(files[i][1])+'\t')
fp.write(str(files[i][2])+'\n')
fp.close()
logging.info("Successfully generated csv file {}".format(total_csv_path))
def processor(dircetory):
""" download and process """
#logging.info("Downloading the dataset may take a long time so you can download it in another way and move it to the dircetory {}".format(dircetory))
LJSpeech = os.path.join(dircetory, "LJSpeech-1.1.tar.bz2")
if os.path.exists(LJSpeech):
logging.info("{} already exist".format(LJSpeech))
else:
download_and_extract(dircetory, URL)
# get total_csv
logging.info("Processing the LJspeech total.csv in {}".format(dircetory))
total_csv_path = os.path.join(dircetory, "total.csv")
convert_audio_and_split_transcript(dircetory, total_csv_path)
split_train_dev_test(total_csv_path, dircetory)
logging.info("Finished processing LJspeech csv ")
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
DIR = sys.argv[1]
processor(DIR)
| 35.537459
| 153
| 0.628873
|
#coding=utf-8
# Copyright (C) 2020 ATHENA AUTHORS; LanYu;
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" LJspeech dataset
This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker
reading passages from 7 non-fiction books. A transcription is provided for each clip.
Clips vary in length from 1 to 10 seconds and have a total length of approximately 24 hours.
detailed information can be seen on https://keithito.com/LJ-Speech-Dataset
"""
import os
import re
import sys
import tarfile
import inflect
import urllib
import tempfile
import codecs
import pandas
from absl import logging
from sklearn.model_selection import train_test_split
from unidecode import unidecode
import tensorflow as tf
from athena import get_wave_file_length
GFILE = tf.compat.v1.gfile
URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
#------------normalize_numbers--------------#
_INFLECT = inflect.engine()
_COMMA_NUMBER_RE = re.compile(r'([0-9][0-9\,]+[0-9])')
_DECIMAL_NUMBER_RE = re.compile(r'([0-9]+\.[0-9]+)')
_POUNDS_RE = re.compile(r'£([0-9\,]*[0-9]+)')
_DOLLARS_RE = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ORDINAL_RE = re.compile(r'[0-9]+(st|nd|rd|th)')
_NUMBER_RE = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _INFLECT.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _INFLECT.number_to_words(num % 100)
elif num % 100 == 0:
return _INFLECT.number_to_words(num // 100) + ' hundred'
else:
return _INFLECT.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _INFLECT.number_to_words(num, andword='')
def normalize_numbers(text):
"""
normalize numbers in text
"""
text = re.sub(_COMMA_NUMBER_RE, _remove_commas, text)
text = re.sub(_POUNDS_RE, r'\1 pounds', text)
text = re.sub(_DOLLARS_RE, _expand_dollars, text)
text = re.sub(_DECIMAL_NUMBER_RE, _expand_decimal_point, text)
text = re.sub(_ORDINAL_RE, _expand_ordinal, text)
text = re.sub(_NUMBER_RE, _expand_number, text)
return text
#---------------clean_text---------------#
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('Mrs', 'Misess'),
('Mr', 'Mister'),
('Dr', 'Doctor'),
('St', 'Saint'),
('Co', 'Company'),
('Jr', 'Junior'),
('Maj', 'Major'),
('Gen', 'General'),
('Drs', 'Doctors'),
('Rev', 'Reverend'),
('Lt', 'Lieutenant'),
('Hon', 'Honorable'),
('Sgt', 'Sergeant'),
('Capt', 'Captain'),
('Esq', 'Esquire'),
('Ltd', 'Limited'),
('Col', 'Colonel'),
('Ft', 'Fort'),
]]
def expand_abbreviations(text):
"""
expand abbreviations in text
"""
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(text):
"""
collapse whitespace in text
"""
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
# NOTE (kan-bayashi): Following functions additionally defined, not inclueded in original codes.
def remove_unnecessary_symbols(text):
"""
remove unnecessary symbols in text
"""
text = re.sub(r'[\(\)\[\]\<\>\"]+', '', text)
return text
def expand_symbols(text):
"""
expand symbols in text
"""
text = re.sub("\;", ",", text)
text = re.sub("\:", ",", text)
text = re.sub("\-", " ", text)
text = re.sub("\&", "and", text)
return text
def preprocess(text):
'''Custom pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = normalize_numbers(text)
text = expand_abbreviations(text)
text = expand_symbols(text)
text = remove_unnecessary_symbols(text)
text = collapse_whitespace(text)
return text
def download_and_extract(directory, url):
"""Download and extract the given split of dataset.
Args:
directory: the directory where to extract the tarball.
url: the url to download the data file.
"""
if not GFILE.Exists(directory):
GFILE.MakeDirs(directory)
_, tar_filepath = tempfile.mkstemp(suffix=".tar.bz2")
try:
logging.info("Downloading %s to %s" % (url, tar_filepath))
def _progress(count, block_size, total_size):
sys.stdout.write(
"\r>> Downloading {} {:.1f}%".format(
tar_filepath, 100.0 * count * block_size / total_size
)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, tar_filepath, _progress)
statinfo = os.stat(tar_filepath)
logging.info(
"Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size)
)
with tarfile.open(tar_filepath, "r") as tar:
tar.extractall(directory)
logging.info("Successfully extracted data from LJSpeech-1.1.tar.bz2")
finally:
GFILE.Remove(tar_filepath)
#----------------create total.csv-----------------
def convert_audio_and_split_transcript(dataset_dir, total_csv_path):
"""Convert rar to WAV and split the transcript.
Args:
dataset_dir : the directory which holds the input dataset.
total_csv_path : the resulting output csv file.
LJSpeech-1.1 dir Tree structure:
LJSpeech-1.1
-metadata.csv
-LJ001-0002|in being comparatively modern.|in being comparatively modern.
...
-wavs
-LJ001-0001.wav
-LJ001-0002.wav
...
-LJ050-0278
-pcms
-audio-LJ001-0001.s16
-audio-LJ001-0002.s16
...
"""
logging.info("Processing audio and transcript for {}".format("all_files"))
wav_dir = os.path.join(dataset_dir, "LJSpeech-1.1/wavs/")
files = []
# ProsodyLabel ---word
with codecs.open(os.path.join(dataset_dir, "LJSpeech-1.1/metadata.csv"),
"r",
encoding="utf-8") as f:
for line in f:
wav_name = line.split('|')[0] + '.wav'
wav_file = os.path.join(wav_dir, wav_name)
wav_length = get_wave_file_length(wav_file)
#get transcript
content = line.split('|')[2]
clean_content = preprocess(content.rstrip())
transcript = ' '.join(list(clean_content))
transcript = transcript.replace(' ', ' <space>')
transcript = 'sp1 ' + transcript + ' sil' #' sil\n'
files.append((os.path.abspath(wav_file), wav_length, transcript))
# Write to txt file which contains three columns:
fp = open(total_csv_path, 'w', encoding="utf-8")
fp.write("wav_filename"+'\t'
"wav_length_ms"+'\t'
"transcript"+'\n')
for i in range(len(files)):
fp.write(str(files[i][0])+'\t')
fp.write(str(files[i][1])+'\t')
fp.write(str(files[i][2])+'\n')
fp.close()
logging.info("Successfully generated csv file {}".format(total_csv_path))
def split_train_dev_test(total_csv, output_dir):
# get total_csv
data = pandas.read_csv(total_csv, encoding='utf-8', sep='\t')
x, y = data.iloc[:, :2], data.iloc[:, 2:]
# split train/dev/test0
x_train, x_rest, y_train, y_rest = train_test_split(x, y, test_size=0.1, random_state=0)
x_test, x_dev, y_test, y_dev = train_test_split(x_rest, y_rest, test_size=0.9, random_state=0)
# add ProsodyLabel
x_train.insert(2, 'transcript', y_train)
x_test.insert(2, 'transcript', y_test)
x_dev.insert(2, 'transcript', y_dev)
# get csv_path
train_csv_path = os.path.join(output_dir, 'train.csv')
dev_csv_path = os.path.join(output_dir, 'dev.csv')
test_csv_path = os.path.join(output_dir, 'test.csv')
# generate csv
x_train.to_csv(train_csv_path, index=False, sep="\t")
logging.info("Successfully generated csv file {}".format(train_csv_path))
x_dev.to_csv(dev_csv_path, index=False, sep="\t")
logging.info("Successfully generated csv file {}".format(dev_csv_path))
x_test.to_csv(test_csv_path, index=False, sep="\t")
logging.info("Successfully generated csv file {}".format(test_csv_path))
def processor(dircetory):
""" download and process """
#logging.info("Downloading the dataset may take a long time so you can download it in another way and move it to the dircetory {}".format(dircetory))
LJSpeech = os.path.join(dircetory, "LJSpeech-1.1.tar.bz2")
if os.path.exists(LJSpeech):
logging.info("{} already exist".format(LJSpeech))
else:
download_and_extract(dircetory, URL)
# get total_csv
logging.info("Processing the LJspeech total.csv in {}".format(dircetory))
total_csv_path = os.path.join(dircetory, "total.csv")
convert_audio_and_split_transcript(dircetory, total_csv_path)
split_train_dev_test(total_csv_path, dircetory)
logging.info("Finished processing LJspeech csv ")
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
DIR = sys.argv[1]
processor(DIR)
| 2,812
| 0
| 192
|
09b04334000e84dd407ae506dce11f9969eacf3d
| 666
|
py
|
Python
|
yossarian/book_groups/migrations/0003_auto_20151231_1605.py
|
avinassh/yossarian
|
b485da0669d87ad29f57ba2a4a446131aaf820a6
|
[
"MIT"
] | null | null | null |
yossarian/book_groups/migrations/0003_auto_20151231_1605.py
|
avinassh/yossarian
|
b485da0669d87ad29f57ba2a4a446131aaf820a6
|
[
"MIT"
] | null | null | null |
yossarian/book_groups/migrations/0003_auto_20151231_1605.py
|
avinassh/yossarian
|
b485da0669d87ad29f57ba2a4a446131aaf820a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 16:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 25.615385
| 101
| 0.62012
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 16:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('book_groups', '0002_progress'),
]
operations = [
migrations.AlterField(
model_name='bookgroup',
name='book',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='books.Book'),
),
migrations.AlterUniqueTogether(
name='progress',
unique_together=set([('book_group', 'user')]),
),
]
| 0
| 457
| 23
|
f3f8f0663273ae09a08d463054d3e72903562007
| 1,347
|
py
|
Python
|
transactions/accounts/migrations/0001_initial.py
|
akash-dev-github/Transactions
|
7f1b8897d914a1cf297aeff750c197d21ce98ca8
|
[
"MIT"
] | null | null | null |
transactions/accounts/migrations/0001_initial.py
|
akash-dev-github/Transactions
|
7f1b8897d914a1cf297aeff750c197d21ce98ca8
|
[
"MIT"
] | null | null | null |
transactions/accounts/migrations/0001_initial.py
|
akash-dev-github/Transactions
|
7f1b8897d914a1cf297aeff750c197d21ce98ca8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 19:24
from __future__ import unicode_literals
import datetime
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| 35.447368
| 130
| 0.614699
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 19:24
from __future__ import unicode_literals
import datetime
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('added_dttm', models.DateTimeField(default=datetime.datetime.now, editable=False)),
('last_modified_dttm', models.DateTimeField(default=datetime.datetime.now)),
('balance', models.DecimalField(decimal_places=8, max_digits=32)),
('currency', models.CharField(choices=[('BTC', 'bitcoin'), ('ETH', 'etherium'), ('PHP', 'pesos')], max_length=8)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'db_table': 'account',
},
),
]
| 0
| 1,085
| 23
|
3d1c5ee77973eec7eef67ddca34a8343a0cf3724
| 6,691
|
py
|
Python
|
custom_components/edgeos/clients/web_socket.py
|
kcleong/homeassistant-config
|
15b7bc75f5d1055d8620ced87eed9d563475296d
|
[
"MIT"
] | null | null | null |
custom_components/edgeos/clients/web_socket.py
|
kcleong/homeassistant-config
|
15b7bc75f5d1055d8620ced87eed9d563475296d
|
[
"MIT"
] | null | null | null |
custom_components/edgeos/clients/web_socket.py
|
kcleong/homeassistant-config
|
15b7bc75f5d1055d8620ced87eed9d563475296d
|
[
"MIT"
] | null | null | null |
"""
This component provides support for Home Automation Manager (HAM).
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/edgeos/
"""
import asyncio
import json
import logging
import re
from typing import Optional
from urllib.parse import urlparse
import aiohttp
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from ..helpers.const import *
from ..models.config_data import ConfigData
REQUIREMENTS = ["aiohttp"]
_LOGGER = logging.getLogger(__name__)
| 28.47234
| 87
| 0.606337
|
"""
This component provides support for Home Automation Manager (HAM).
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/edgeos/
"""
import asyncio
import json
import logging
import re
from typing import Optional
from urllib.parse import urlparse
import aiohttp
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from ..helpers.const import *
from ..models.config_data import ConfigData
REQUIREMENTS = ["aiohttp"]
_LOGGER = logging.getLogger(__name__)
class EdgeOSWebSocket:
def __init__(self, hass, config_manager, topics, edgeos_callback):
self._config_manager = config_manager
self._last_update = datetime.now()
self._edgeos_callback = edgeos_callback
self._hass = hass
self._session_id = None
self._topics = topics
self._session = None
self._ws = None
self._pending_payloads = []
self.shutting_down = False
self._is_connected = False
@property
def config_data(self) -> Optional[ConfigData]:
if self._config_manager is not None:
return self._config_manager.data
return None
@property
def ws_url(self):
url = urlparse(self.config_data.url)
ws_url = WEBSOCKET_URL_TEMPLATE.format(url.netloc)
return ws_url
async def initialize(self, cookies, session_id):
_LOGGER.debug("Initializing WS connection")
try:
self._is_connected = False
self.shutting_down = False
self._session_id = session_id
if self._hass is None:
self._session = aiohttp.client.ClientSession(cookies=cookies)
else:
self._session = async_create_clientsession(
hass=self._hass, cookies=cookies
)
except Exception as ex:
_LOGGER.warning(f"Failed to create session of EdgeOS WS, Error: {str(ex)}")
try:
async with self._session.ws_connect(
self.ws_url,
origin=self.config_data.url,
ssl=False,
autoclose=True,
max_msg_size=MAX_MSG_SIZE,
timeout=SCAN_INTERVAL_WS_TIMEOUT,
) as ws:
self._is_connected = True
self._ws = ws
await self.listen()
except Exception as ex:
if self._session is not None and self._session.closed:
_LOGGER.info(f"WS Session closed")
else:
_LOGGER.warning(f"Failed to connect EdgeOS WS, Error: {ex}")
self._is_connected = False
_LOGGER.info("WS Connection terminated")
@property
def is_initialized(self):
is_initialized = self._session is not None and not self._session.closed
return is_initialized
@property
def last_update(self):
result = self._last_update
return result
def parse_message(self, message):
parsed = False
try:
message = message.replace(NEW_LINE, EMPTY_STRING)
message = re.sub(BEGINS_WITH_SIX_DIGITS, EMPTY_STRING, message)
if len(self._pending_payloads) > 0:
message_previous = "".join(self._pending_payloads)
message = f"{message_previous}{message}"
if len(message) > 0:
payload_json = json.loads(message)
self._edgeos_callback(payload_json)
parsed = True
else:
_LOGGER.debug("Parse message skipped (Empty)")
except Exception as ex:
_LOGGER.debug(f"Parse message failed due to partial payload, Error: {ex}")
finally:
if parsed or len(self._pending_payloads) > MAX_PENDING_PAYLOADS:
self._pending_payloads = []
else:
self._pending_payloads.append(message)
async def async_send_heartbeat(self):
_LOGGER.debug(f"Keep alive message sent")
data = self.get_keep_alive_data()
if self._is_connected:
await self._ws.send_str(data)
async def listen(self):
_LOGGER.info(f"Starting to listen connected")
subscription_data = self.get_subscription_data()
await self._ws.send_str(subscription_data)
_LOGGER.info("Subscribed to WS payloads")
async for msg in self._ws:
continue_to_next = self.handle_next_message(msg)
if (
not continue_to_next
or not self.is_initialized
or not self._is_connected
):
break
_LOGGER.info(f"Stop listening")
def handle_next_message(self, msg):
_LOGGER.debug(f"Starting to handle next message")
result = False
if msg.type in (
aiohttp.WSMsgType.CLOSE,
aiohttp.WSMsgType.CLOSED,
aiohttp.WSMsgType.CLOSING,
):
_LOGGER.info("Connection closed (By Message Close)")
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.warning(f"Connection error, Description: {self._ws.exception()}")
else:
if self.config_data.log_incoming_messages:
_LOGGER.debug(f"New message received: {str(msg)}")
self._last_update = datetime.now()
if msg.data == "close":
result = False
else:
self.parse_message(msg.data)
result = True
return result
def disconnect(self):
self._is_connected = False
async def close(self):
_LOGGER.info("Closing connection to WS")
self._session_id = None
self._is_connected = False
if self._ws is not None:
await self._ws.close()
await asyncio.sleep(DISCONNECT_INTERVAL)
self._ws = None
@staticmethod
def get_keep_alive_data():
content = "{CLIENT_PING}"
_LOGGER.debug(f"Keep alive data to be sent: {content}")
return content
def get_subscription_data(self):
topics_to_subscribe = [{WS_TOPIC_NAME: topic} for topic in self._topics]
topics_to_unsubscribe = []
data = {
WS_TOPIC_SUBSCRIBE: topics_to_subscribe,
WS_TOPIC_UNSUBSCRIBE: topics_to_unsubscribe,
WS_SESSION_ID: self._session_id,
}
content = json.dumps(data, separators=(STRING_COMMA, STRING_COLON))
content_length = len(content)
data = f"{content_length}\n{content}"
_LOGGER.debug(f"Subscription data to be sent: {data}")
return data
| 5,670
| 452
| 23
|
9ca3fcb4f36e750dc158f22eee6d8701f2799cd8
| 1,918
|
py
|
Python
|
venv/lib/python3.8/site-packages/azureml/_base_sdk_common/workspace/models/linked_service_props.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azureml/_base_sdk_common/workspace/models/linked_service_props.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azureml/_base_sdk_common/workspace/models/linked_service_props.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinkedServiceProps(Model):
"""LinkedService specific properties.
:param linked_service_resource_id: ResourceId of the link target of the
linked service.
:type linked_service_resource_id: str
:param link_type: Type of the link target. Possible values include:
'Synapse'
:type link_type: str or ~_restclient.models.LinkedServiceLinkType
:param created_time: The creation time of the linked service.
:type created_time: datetime
:param modified_time: The last modified time of the linked service.
:type modified_time: datetime
"""
_validation = {
'linked_service_resource_id': {'required': True},
}
_attribute_map = {
'linked_service_resource_id': {'key': 'linkedServiceResourceId', 'type': 'str'},
'link_type': {'key': 'linkType', 'type': 'LinkedServiceLinkType'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
}
| 40.808511
| 107
| 0.637643
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinkedServiceProps(Model):
"""LinkedService specific properties.
:param linked_service_resource_id: ResourceId of the link target of the
linked service.
:type linked_service_resource_id: str
:param link_type: Type of the link target. Possible values include:
'Synapse'
:type link_type: str or ~_restclient.models.LinkedServiceLinkType
:param created_time: The creation time of the linked service.
:type created_time: datetime
:param modified_time: The last modified time of the linked service.
:type modified_time: datetime
"""
_validation = {
'linked_service_resource_id': {'required': True},
}
_attribute_map = {
'linked_service_resource_id': {'key': 'linkedServiceResourceId', 'type': 'str'},
'link_type': {'key': 'linkType', 'type': 'LinkedServiceLinkType'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
}
def __init__(self, linked_service_resource_id, link_type=None, created_time=None, modified_time=None):
super(LinkedServiceProps, self).__init__()
self.linked_service_resource_id = linked_service_resource_id
self.link_type = link_type
self.created_time = created_time
self.modified_time = modified_time
| 325
| 0
| 29
|
626ab922c10d3f79f86d1f22251f5d6b2a46edfe
| 963
|
py
|
Python
|
notebooks/1.0-vg-runtime-simulation/timing.py
|
v715/py-U-Rerf
|
d1821ce95a3ccc3345faa673371f7a8e9a797f72
|
[
"FTL"
] | 2
|
2018-09-18T00:06:46.000Z
|
2018-09-18T12:59:38.000Z
|
notebooks/1.0-vg-runtime-simulation/timing.py
|
v715/py-U-Rerf
|
d1821ce95a3ccc3345faa673371f7a8e9a797f72
|
[
"FTL"
] | 17
|
2018-09-17T23:50:25.000Z
|
2018-10-12T19:30:21.000Z
|
notebooks/1.0-vg-runtime-simulation/timing.py
|
v715/py-U-Rerf
|
d1821ce95a3ccc3345faa673371f7a8e9a797f72
|
[
"FTL"
] | null | null | null |
#!/usr/bin/env python
# graph.py
# Created by Vivek Gopalakrishnan on 2018-11-13.
# Email: vgopala4@jhu.edu
# Copyright (c) 2018. All rights reserved.
import timeit
from src.features.summary import Stats
from src.random.bernoulli import RandomGraph
def measure_runtime(n, p, number=5):
"""
Calculates the runtime for a given graph.
Does not time the functions: 'khop_locality', 'scan_statistic'
"""
# Initialize graph and stats class
A = RandomGraph(int(n), p)
s = Stats(A)
public_method_names = [method for method in dir(s) if callable(
getattr(s, method)) if not method.startswith('_')]
for method in ['return_stats', 'khop_locality', 'scan_statistic']:
public_method_names.remove(method)
# Dictionary for holding results
results = [n, p]
# Runtime
for method in public_method_names:
results += [timeit.timeit(lambda: getattr(s, method)(), number=number)]
return results
| 26.027027
| 79
| 0.686397
|
#!/usr/bin/env python
# graph.py
# Created by Vivek Gopalakrishnan on 2018-11-13.
# Email: vgopala4@jhu.edu
# Copyright (c) 2018. All rights reserved.
import timeit
from src.features.summary import Stats
from src.random.bernoulli import RandomGraph
def measure_runtime(n, p, number=5):
"""
Calculates the runtime for a given graph.
Does not time the functions: 'khop_locality', 'scan_statistic'
"""
# Initialize graph and stats class
A = RandomGraph(int(n), p)
s = Stats(A)
public_method_names = [method for method in dir(s) if callable(
getattr(s, method)) if not method.startswith('_')]
for method in ['return_stats', 'khop_locality', 'scan_statistic']:
public_method_names.remove(method)
# Dictionary for holding results
results = [n, p]
# Runtime
for method in public_method_names:
results += [timeit.timeit(lambda: getattr(s, method)(), number=number)]
return results
| 0
| 0
| 0
|
8d1f18ed4554971d39a164bbe0c71bd5477eddf5
| 1,360
|
py
|
Python
|
NodeClassification/ADMM/ADMM/main_admm_auto_tune.py
|
x-zho14/Unified-LTH-GNN
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
[
"MIT"
] | 29
|
2021-02-17T02:46:54.000Z
|
2022-03-18T02:09:03.000Z
|
NodeClassification/ADMM/ADMM/main_admm_auto_tune.py
|
x-zho14/Unified-LTH-GNN
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
[
"MIT"
] | 1
|
2021-09-03T13:30:50.000Z
|
2021-09-03T13:30:50.000Z
|
NodeClassification/ADMM/ADMM/main_admm_auto_tune.py
|
x-zho14/Unified-LTH-GNN
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
[
"MIT"
] | 10
|
2021-04-01T16:27:03.000Z
|
2022-03-07T09:20:38.000Z
|
import os
import numpy as np
learning_rate = [0.01, 0.001]
prune_ratio = 30
ADMM_times = [2,3,4,5,6,7]
Total_epochs = [10,30,40,50,60]
target_accuracy = 0.76
count = 0
highest_acc = 0
for i in range(len(learning_rate)):
for j in range(len(ADMM_times)):
for k in range(len(Total_epochs)):
lr = learning_rate[i]
admm = ADMM_times[j]
epoch = Total_epochs[k]
#linux
#os.system('rm '+"log"+str(count)+".txt")
#windows
os.system('del '+"log"+str(count)+".txt")
os.system("python train-auto-admm-tuneParameter.py"
+" --target_acc="+str(target_accuracy)
+" --prune_ratio="+str(prune_ratio)
+" --count=" + str(count)
+" --learning_rate="+str(lr)
+" --ADMM="+str(admm)
+" --epochs="+str(epoch)
+" >>log"+str(count)+".txt")
f = open("log" + str(count) + ".txt")
for line2 in f:
if "Finally Test set results" in line2:
res = line2.split()
if float(res[7]) > highest_acc:
highest_acc = float(res[7])
count+=1
print("highest accuracy only train with pruned adjacency + weights: ", highest_acc)
| 35.789474
| 83
| 0.491912
|
import os
import numpy as np
learning_rate = [0.01, 0.001]
prune_ratio = 30
ADMM_times = [2,3,4,5,6,7]
Total_epochs = [10,30,40,50,60]
target_accuracy = 0.76
count = 0
highest_acc = 0
for i in range(len(learning_rate)):
for j in range(len(ADMM_times)):
for k in range(len(Total_epochs)):
lr = learning_rate[i]
admm = ADMM_times[j]
epoch = Total_epochs[k]
#linux
#os.system('rm '+"log"+str(count)+".txt")
#windows
os.system('del '+"log"+str(count)+".txt")
os.system("python train-auto-admm-tuneParameter.py"
+" --target_acc="+str(target_accuracy)
+" --prune_ratio="+str(prune_ratio)
+" --count=" + str(count)
+" --learning_rate="+str(lr)
+" --ADMM="+str(admm)
+" --epochs="+str(epoch)
+" >>log"+str(count)+".txt")
f = open("log" + str(count) + ".txt")
for line2 in f:
if "Finally Test set results" in line2:
res = line2.split()
if float(res[7]) > highest_acc:
highest_acc = float(res[7])
count+=1
print("highest accuracy only train with pruned adjacency + weights: ", highest_acc)
| 0
| 0
| 0
|
7dc23fe92ba79adcfc96da36352928d104bdba79
| 490
|
py
|
Python
|
apps/users/admin.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 2
|
2015-04-06T15:20:29.000Z
|
2016-12-30T12:25:11.000Z
|
apps/users/admin.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 2
|
2019-02-17T17:38:02.000Z
|
2019-03-28T03:49:16.000Z
|
apps/users/admin.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 1
|
2019-03-28T03:49:18.000Z
|
2019-03-28T03:49:18.000Z
|
from django.contrib import admin
from tower import ugettext_lazy as _
from users.models import Profile, Link
username = lambda u: u.user.username
username.short_description = _('Username')
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Link, LinkAdmin)
| 21.304348
| 42
| 0.734694
|
from django.contrib import admin
from tower import ugettext_lazy as _
from users.models import Profile, Link
username = lambda u: u.user.username
username.short_description = _('Username')
class ProfileAdmin(admin.ModelAdmin):
list_display = (username, 'name')
search_fields = ('name',)
class LinkAdmin(admin.ModelAdmin):
list_display = ('name', 'url')
search_fields = ('name', 'url')
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Link, LinkAdmin)
| 0
| 168
| 46
|
324d309f43981206af584f6bc12c55d7e3d30736
| 4,574
|
py
|
Python
|
IMU/VTK-6.2.0/Filters/General/Testing/Python/TestMultiBlockStreamer.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 4
|
2016-03-30T14:31:52.000Z
|
2019-02-02T05:01:32.000Z
|
IMU/VTK-6.2.0/Filters/General/Testing/Python/TestMultiBlockStreamer.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
IMU/VTK-6.2.0/Filters/General/Testing/Python/TestMultiBlockStreamer.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 2
|
2019-08-30T23:36:13.000Z
|
2019-11-08T16:52:01.000Z
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# we need to use composite data pipeline with multiblock datasets
alg = vtk.vtkAlgorithm()
pip = vtk.vtkCompositeDataPipeline()
alg.SetDefaultExecutivePrototype(pip)
#del pip
Ren1 = vtk.vtkRenderer()
Ren1.SetBackground(0.33, 0.35, 0.43)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(Ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
Plot3D0 = vtk.vtkMultiBlockPLOT3DReader()
Plot3D0.SetFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
Plot3D0.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
Plot3D0.SetBinaryFile(1)
Plot3D0.SetMultiGrid(0)
Plot3D0.SetHasByteCount(0)
Plot3D0.SetIBlanking(0)
Plot3D0.SetTwoDimensionalGeometry(0)
Plot3D0.SetForceRead(0)
Plot3D0.SetByteOrder(0)
Plot3D0.Update()
output = Plot3D0.GetOutput().GetBlock(0)
Geometry5 = vtk.vtkStructuredGridOutlineFilter()
Geometry5.SetInputData(output)
Mapper5 = vtk.vtkPolyDataMapper()
Mapper5.SetInputConnection(Geometry5.GetOutputPort())
Mapper5.SetImmediateModeRendering(1)
Mapper5.UseLookupTableScalarRangeOn()
Mapper5.SetScalarVisibility(0)
Mapper5.SetScalarModeToDefault()
Actor5 = vtk.vtkActor()
Actor5.SetMapper(Mapper5)
Actor5.GetProperty().SetRepresentationToSurface()
Actor5.GetProperty().SetInterpolationToGouraud()
Actor5.GetProperty().SetAmbient(0.15)
Actor5.GetProperty().SetDiffuse(0.85)
Actor5.GetProperty().SetSpecular(0.1)
Actor5.GetProperty().SetSpecularPower(100)
Actor5.GetProperty().SetSpecularColor(1, 1, 1)
Actor5.GetProperty().SetColor(1, 1, 1)
Ren1.AddActor(Actor5)
ExtractGrid0 = vtk.vtkExtractGrid()
ExtractGrid0.SetInputData(output)
ExtractGrid0.SetVOI(0, 14, 0, 32, 0, 24)
ExtractGrid0.SetSampleRate(1, 1, 1)
ExtractGrid0.SetIncludeBoundary(0)
ExtractGrid1 = vtk.vtkExtractGrid()
ExtractGrid1.SetInputData(output)
ExtractGrid1.SetVOI(14, 29, 0, 32, 0, 24)
ExtractGrid1.SetSampleRate(1, 1, 1)
ExtractGrid1.SetIncludeBoundary(0)
ExtractGrid2 = vtk.vtkExtractGrid()
ExtractGrid2.SetInputData(output)
ExtractGrid2.SetVOI(29, 56, 0, 32, 0, 24)
ExtractGrid2.SetSampleRate(1, 1, 1)
ExtractGrid2.SetIncludeBoundary(0)
LineSourceWidget0 = vtk.vtkLineSource()
LineSourceWidget0.SetPoint1(3.05638, -3.00497, 28.2211)
LineSourceWidget0.SetPoint2(3.05638, 3.95916, 28.2211)
LineSourceWidget0.SetResolution(20)
mbds = vtk.vtkMultiBlockDataSet()
mbds.SetNumberOfBlocks(3)
i = 0
while i < 3:
eval("ExtractGrid" + str(i)).Update()
exec("sg" + str(i) + " = vtk.vtkStructuredGrid()")
eval("sg" + str(i)).ShallowCopy(eval("ExtractGrid" + str(i)).GetOutput())
mbds.SetBlock(i, eval("sg" + str(i)))
i += 1
Stream0 = vtk.vtkStreamTracer()
Stream0.SetInputData(mbds)
Stream0.SetSourceConnection(LineSourceWidget0.GetOutputPort())
Stream0.SetIntegrationStepUnit(2)
Stream0.SetMaximumPropagation(20)
Stream0.SetInitialIntegrationStep(0.5)
Stream0.SetIntegrationDirection(0)
Stream0.SetIntegratorType(0)
Stream0.SetMaximumNumberOfSteps(2000)
Stream0.SetTerminalSpeed(1e-12)
#del mbds
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(Stream0.GetOutputPort())
aa.Assign("Normals", "NORMALS", "POINT_DATA")
Ribbon0 = vtk.vtkRibbonFilter()
Ribbon0.SetInputConnection(aa.GetOutputPort())
Ribbon0.SetWidth(0.1)
Ribbon0.SetAngle(0)
Ribbon0.SetDefaultNormal(0, 0, 1)
Ribbon0.SetVaryWidth(0)
LookupTable1 = vtk.vtkLookupTable()
LookupTable1.SetNumberOfTableValues(256)
LookupTable1.SetHueRange(0, 0.66667)
LookupTable1.SetSaturationRange(1, 1)
LookupTable1.SetValueRange(1, 1)
LookupTable1.SetTableRange(0.197813, 0.710419)
LookupTable1.SetVectorComponent(0)
LookupTable1.Build()
Mapper10 = vtk.vtkPolyDataMapper()
Mapper10.SetInputConnection(Ribbon0.GetOutputPort())
Mapper10.SetImmediateModeRendering(1)
Mapper10.UseLookupTableScalarRangeOn()
Mapper10.SetScalarVisibility(1)
Mapper10.SetScalarModeToUsePointFieldData()
Mapper10.SelectColorArray("Density")
Mapper10.SetLookupTable(LookupTable1)
Actor10 = vtk.vtkActor()
Actor10.SetMapper(Mapper10)
Actor10.GetProperty().SetRepresentationToSurface()
Actor10.GetProperty().SetInterpolationToGouraud()
Actor10.GetProperty().SetAmbient(0.15)
Actor10.GetProperty().SetDiffuse(0.85)
Actor10.GetProperty().SetSpecular(0)
Actor10.GetProperty().SetSpecularPower(1)
Actor10.GetProperty().SetSpecularColor(1, 1, 1)
Ren1.AddActor(Actor10)
iren.Initialize()
alg.SetDefaultExecutivePrototype(None)
#del alg
#iren.Start()
| 30.291391
| 78
| 0.776782
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# we need to use composite data pipeline with multiblock datasets
alg = vtk.vtkAlgorithm()
pip = vtk.vtkCompositeDataPipeline()
alg.SetDefaultExecutivePrototype(pip)
#del pip
Ren1 = vtk.vtkRenderer()
Ren1.SetBackground(0.33, 0.35, 0.43)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(Ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
Plot3D0 = vtk.vtkMultiBlockPLOT3DReader()
Plot3D0.SetFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
Plot3D0.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
Plot3D0.SetBinaryFile(1)
Plot3D0.SetMultiGrid(0)
Plot3D0.SetHasByteCount(0)
Plot3D0.SetIBlanking(0)
Plot3D0.SetTwoDimensionalGeometry(0)
Plot3D0.SetForceRead(0)
Plot3D0.SetByteOrder(0)
Plot3D0.Update()
output = Plot3D0.GetOutput().GetBlock(0)
Geometry5 = vtk.vtkStructuredGridOutlineFilter()
Geometry5.SetInputData(output)
Mapper5 = vtk.vtkPolyDataMapper()
Mapper5.SetInputConnection(Geometry5.GetOutputPort())
Mapper5.SetImmediateModeRendering(1)
Mapper5.UseLookupTableScalarRangeOn()
Mapper5.SetScalarVisibility(0)
Mapper5.SetScalarModeToDefault()
Actor5 = vtk.vtkActor()
Actor5.SetMapper(Mapper5)
Actor5.GetProperty().SetRepresentationToSurface()
Actor5.GetProperty().SetInterpolationToGouraud()
Actor5.GetProperty().SetAmbient(0.15)
Actor5.GetProperty().SetDiffuse(0.85)
Actor5.GetProperty().SetSpecular(0.1)
Actor5.GetProperty().SetSpecularPower(100)
Actor5.GetProperty().SetSpecularColor(1, 1, 1)
Actor5.GetProperty().SetColor(1, 1, 1)
Ren1.AddActor(Actor5)
ExtractGrid0 = vtk.vtkExtractGrid()
ExtractGrid0.SetInputData(output)
ExtractGrid0.SetVOI(0, 14, 0, 32, 0, 24)
ExtractGrid0.SetSampleRate(1, 1, 1)
ExtractGrid0.SetIncludeBoundary(0)
ExtractGrid1 = vtk.vtkExtractGrid()
ExtractGrid1.SetInputData(output)
ExtractGrid1.SetVOI(14, 29, 0, 32, 0, 24)
ExtractGrid1.SetSampleRate(1, 1, 1)
ExtractGrid1.SetIncludeBoundary(0)
ExtractGrid2 = vtk.vtkExtractGrid()
ExtractGrid2.SetInputData(output)
ExtractGrid2.SetVOI(29, 56, 0, 32, 0, 24)
ExtractGrid2.SetSampleRate(1, 1, 1)
ExtractGrid2.SetIncludeBoundary(0)
LineSourceWidget0 = vtk.vtkLineSource()
LineSourceWidget0.SetPoint1(3.05638, -3.00497, 28.2211)
LineSourceWidget0.SetPoint2(3.05638, 3.95916, 28.2211)
LineSourceWidget0.SetResolution(20)
mbds = vtk.vtkMultiBlockDataSet()
mbds.SetNumberOfBlocks(3)
i = 0
while i < 3:
eval("ExtractGrid" + str(i)).Update()
exec("sg" + str(i) + " = vtk.vtkStructuredGrid()")
eval("sg" + str(i)).ShallowCopy(eval("ExtractGrid" + str(i)).GetOutput())
mbds.SetBlock(i, eval("sg" + str(i)))
i += 1
Stream0 = vtk.vtkStreamTracer()
Stream0.SetInputData(mbds)
Stream0.SetSourceConnection(LineSourceWidget0.GetOutputPort())
Stream0.SetIntegrationStepUnit(2)
Stream0.SetMaximumPropagation(20)
Stream0.SetInitialIntegrationStep(0.5)
Stream0.SetIntegrationDirection(0)
Stream0.SetIntegratorType(0)
Stream0.SetMaximumNumberOfSteps(2000)
Stream0.SetTerminalSpeed(1e-12)
#del mbds
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(Stream0.GetOutputPort())
aa.Assign("Normals", "NORMALS", "POINT_DATA")
Ribbon0 = vtk.vtkRibbonFilter()
Ribbon0.SetInputConnection(aa.GetOutputPort())
Ribbon0.SetWidth(0.1)
Ribbon0.SetAngle(0)
Ribbon0.SetDefaultNormal(0, 0, 1)
Ribbon0.SetVaryWidth(0)
LookupTable1 = vtk.vtkLookupTable()
LookupTable1.SetNumberOfTableValues(256)
LookupTable1.SetHueRange(0, 0.66667)
LookupTable1.SetSaturationRange(1, 1)
LookupTable1.SetValueRange(1, 1)
LookupTable1.SetTableRange(0.197813, 0.710419)
LookupTable1.SetVectorComponent(0)
LookupTable1.Build()
Mapper10 = vtk.vtkPolyDataMapper()
Mapper10.SetInputConnection(Ribbon0.GetOutputPort())
Mapper10.SetImmediateModeRendering(1)
Mapper10.UseLookupTableScalarRangeOn()
Mapper10.SetScalarVisibility(1)
Mapper10.SetScalarModeToUsePointFieldData()
Mapper10.SelectColorArray("Density")
Mapper10.SetLookupTable(LookupTable1)
Actor10 = vtk.vtkActor()
Actor10.SetMapper(Mapper10)
Actor10.GetProperty().SetRepresentationToSurface()
Actor10.GetProperty().SetInterpolationToGouraud()
Actor10.GetProperty().SetAmbient(0.15)
Actor10.GetProperty().SetDiffuse(0.85)
Actor10.GetProperty().SetSpecular(0)
Actor10.GetProperty().SetSpecularPower(1)
Actor10.GetProperty().SetSpecularColor(1, 1, 1)
Ren1.AddActor(Actor10)
iren.Initialize()
alg.SetDefaultExecutivePrototype(None)
#del alg
#iren.Start()
| 0
| 0
| 0
|
716976b200135720854972a197428b45a78f6344
| 207
|
py
|
Python
|
pdbuddy/formatters/simple.py
|
emou/pdbuddy
|
5708c44803e46d06aca02a0402ebaec0c5ae4634
|
[
"MIT"
] | null | null | null |
pdbuddy/formatters/simple.py
|
emou/pdbuddy
|
5708c44803e46d06aca02a0402ebaec0c5ae4634
|
[
"MIT"
] | null | null | null |
pdbuddy/formatters/simple.py
|
emou/pdbuddy
|
5708c44803e46d06aca02a0402ebaec0c5ae4634
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from pdbuddy.formatters.base import BaseFormatter
| 20.7
| 49
| 0.772947
|
from __future__ import absolute_import
from pdbuddy.formatters.base import BaseFormatter
class SimpleFormatter(BaseFormatter):
def __call__(self, frame, event, arg):
return str(frame.f_code)
| 50
| 16
| 50
|
26e8cceb26d881c4c50d1907d6a8246ef169ec29
| 786
|
py
|
Python
|
Search_4_letter-webApp/vsearch4web.py
|
dlouima/python_project
|
2f84c5131efccfa04a633a608605b957b20b5f7e
|
[
"Apache-2.0"
] | null | null | null |
Search_4_letter-webApp/vsearch4web.py
|
dlouima/python_project
|
2f84c5131efccfa04a633a608605b957b20b5f7e
|
[
"Apache-2.0"
] | null | null | null |
Search_4_letter-webApp/vsearch4web.py
|
dlouima/python_project
|
2f84c5131efccfa04a633a608605b957b20b5f7e
|
[
"Apache-2.0"
] | null | null | null |
import vsearch as vsearch
from flask import Flask, render_template, request, redirect
app= Flask(__name__)
#
# @app.route('/')
# def hello() -> str:
# return redirect('/entry')
@app.route('/search4', methods=['POST'])
@app.route('/')
@app.route('/entry')
if __name__ == '__main__0':
app.run(debug=True)
| 22.457143
| 90
| 0.653944
|
import vsearch as vsearch
from flask import Flask, render_template, request, redirect
app= Flask(__name__)
#
# @app.route('/')
# def hello() -> str:
# return redirect('/entry')
@app.route('/search4', methods=['POST'])
def do_search():
phrase = request.form['phrase']
letters= request.form['letters']
title='Here are your results'
results= str(vsearch.search4letters(phrase, letters))
return render_template(
'results.html',
the_phrase = phrase,
the_letters = letters,
the_title = title,
the_results = results,
)
@app.route('/')
@app.route('/entry')
def entry_page():
return render_template('entry.html', the_title='Welcome to search4letter on the web!')
if __name__ == '__main__0':
app.run(debug=True)
| 423
| 0
| 44
|
a0f8dba11d5c9d23036254d6bc2c8e4009334ab5
| 580
|
py
|
Python
|
src/tree/0669.trim-a-binary-search-tree/trim-a-binary-search-tree.py
|
lyphui/Just-Code
|
e0c3c3ecb67cb805080ff686e88522b2bffe7741
|
[
"MIT"
] | 782
|
2019-11-19T08:20:49.000Z
|
2022-03-25T06:59:09.000Z
|
src/0669.trim-a-binary-search-tree/trim-a-binary-search-tree.py
|
Heitao5200/Just-Code
|
5bb3ee485a103418e693b7ec8e26dc84f3691c79
|
[
"MIT"
] | 1
|
2021-03-04T12:21:01.000Z
|
2021-03-05T01:23:54.000Z
|
src/0669.trim-a-binary-search-tree/trim-a-binary-search-tree.py
|
Heitao5200/Just-Code
|
5bb3ee485a103418e693b7ec8e26dc84f3691c79
|
[
"MIT"
] | 155
|
2019-11-20T08:20:42.000Z
|
2022-03-19T07:28:09.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 27.619048
| 66
| 0.532759
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def trimBST(self, root: TreeNode, L: int, R: int) -> TreeNode:
if not root:
return None
if root.val<L:
return self.trimBST(root.right, L,R)
if root.val>R:
return self.trimBST(root.left, L,R)
root.left = self.trimBST(root.left, L,R)
root.right = self.trimBST(root.right, L,R)
return root
| 376
| -6
| 49
|
2d982d9e3ea381e0c12055f558db9cd1ac20e3d2
| 9,925
|
py
|
Python
|
set_up_grasp_models/check_models/thermodynamics_checks.py
|
martamatos/set_up_grasp_models
|
0028f063c41104e3c0404956aa225e76aa6ac983
|
[
"MIT"
] | null | null | null |
set_up_grasp_models/check_models/thermodynamics_checks.py
|
martamatos/set_up_grasp_models
|
0028f063c41104e3c0404956aa225e76aa6ac983
|
[
"MIT"
] | 5
|
2019-05-14T17:05:41.000Z
|
2019-05-29T13:17:11.000Z
|
set_up_grasp_models/check_models/thermodynamics_checks.py
|
martamatos/set_up_grasp_models
|
0028f063c41104e3c0404956aa225e76aa6ac983
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
def calculate_dG(data_dict: dict, gas_constant: float, temperature: float, rxn_order: list = None) -> tuple:
"""
Given a dictionary representing a GRASP input file, calculates the minimum and maximum reaction dGs based on the
standard dGs in thermoRxns and metabolite concentrations in thermoMets.
It also calculates the mass-action ratio and the part of the dG based on the mass-action ratio.
Args:
data_dict: a dictionary that represents the excel file with the GRASP model.
gas_constant: the gas constant to calculate the Gibbs energy.
temperature: the temperature to calculate the Gibbs energy.
rxn_order: a list with the reactions order (optional).
Returns:
Mass action ratio dataframe, dG_Q dataframe, Gibbs energies dataframe.
"""
dG_Q_df = pd.DataFrame()
dG_df = pd.DataFrame()
ma_df = pd.DataFrame()
stoic_df = data_dict['stoic']
mets_conc_df = data_dict['thermoMets']
mets_conc_df['mean (M)'] = (mets_conc_df['min (M)'] + mets_conc_df['max (M)']) / 2.
dG_std_df = data_dict['thermoRxns']
dG_std_df['∆Gr_mean'] = (dG_std_df['∆Gr\'_min (kJ/mol)'] + dG_std_df['∆Gr\'_max (kJ/mol)']) / 2.
rxn_names = stoic_df.index.values
stoic_matrix = stoic_df.values
min_met_conc = mets_conc_df['min (M)'].values
max_met_conc = mets_conc_df['max (M)'].values
dG_list_mean, dG_Q_list_mean, ma_ratio_list_mean = _get_dG_list(rxn_names, stoic_matrix,
mets_conc_df['mean (M)'].values,
mets_conc_df['mean (M)'].values,
dG_std_df['∆Gr_mean'].values,
gas_constant, temperature)
dG_list_min, dG_Q_list_min, ma_ratio_list_min = _get_dG_list(rxn_names, stoic_matrix, max_met_conc, min_met_conc,
dG_std_df['∆Gr\'_min (kJ/mol)'].values,
gas_constant, temperature)
dG_list_max, dG_Q_list_max, ma_ratio_list_max = _get_dG_list(rxn_names, stoic_matrix, min_met_conc, max_met_conc,
dG_std_df['∆Gr\'_max (kJ/mol)'].values,
gas_constant, temperature)
ma_df['ma_min'] = ma_ratio_list_min
ma_df['ma_mean'] = ma_ratio_list_mean
ma_df['ma_max'] = ma_ratio_list_max
dG_Q_df['∆G_Q_min'] = dG_Q_list_min
dG_Q_df['∆G_Q_mean'] = dG_Q_list_mean
dG_Q_df['∆G_Q_max'] = dG_Q_list_max
dG_df['∆G_min'] = dG_list_min
dG_df['∆G_mean'] = dG_list_mean
dG_df['∆G_max'] = dG_list_max
ma_df.index = rxn_names
dG_Q_df.index = rxn_names
dG_df.index = rxn_names
if rxn_order:
ma_df = ma_df.reindex(rxn_order)
dG_Q_df = dG_Q_df.reindex(rxn_order)
dG_df = dG_df.reindex(rxn_order)
return ma_df, dG_Q_df, dG_df
def get_robust_fluxes(data_dict: dict, rxn_order: list = None) -> pd.DataFrame:
"""
Given a dictionary representing a GRASP input file, it calculates the robust fluxes (almost) as in GRASP,
unless the system is not fully determined.
Args:
data_dict: path to the GRASP input file
rxn_order: a list with the reactions order (optional)
Returns:
fluxes_df: dataframe with flux mean and std values
"""
fluxes_df = pd.DataFrame()
stoic_balanced, rxn_list = _get_balanced_s_matrix(data_dict)
# n_reactions = len(rxn_order)
meas_rates_mean, meas_rates_std = _get_meas_rates(data_dict, rxn_list)
v_mean, v_std = _compute_robust_fluxes(stoic_balanced, meas_rates_mean, meas_rates_std, rxn_list)
fluxes_df['vref_mean (mmol/L/h)'] = v_mean
fluxes_df['vref_std (mmol/L/h)'] = v_std
fluxes_df.index = rxn_list
if rxn_order:
fluxes_df = fluxes_df.reindex(rxn_order)
fluxes_df = fluxes_df.reindex(rxn_order)
return fluxes_df
def check_thermodynamic_feasibility(data_dict: dict) -> tuple:
"""
Given a dictionary representing a GRASP input file, it checks if the reaction's dG are compatible with the
respective fluxes. It works both when all fluxes are specified in measRates and when robust fluxes are calculated
for a fully determined system. If the fluxes are not fully specified not the system is fully determined, it
doesn't work.
Args:
data_dict: a dictionary representing a GRASP input file.
Returns:
Whether or not the model is thermodynamically feasible plus fluxes and Gibbs energies dataframes.
"""
print('\nChecking if fluxes and Gibbs energies are compatible.\n')
flag = False
temperature = 298 # in K
gas_constant = 8.314 * 10**-3 # in kJ K^-1 mol^-1
stoic_df = data_dict['stoic']
flux_df = data_dict['measRates']
ma_df, dG_Q_df, dG_df = calculate_dG(data_dict, gas_constant, temperature)
if len(stoic_df.index) != len(flux_df.index):
flux_df = get_robust_fluxes(data_dict)
for rxn in flux_df.index:
if flux_df.loc[rxn, 'vref_mean (mmol/L/h)'] > 0 and dG_df.loc[rxn, '∆G_min'] > 0:
print(f'The flux and ∆G range seem to be incompatible for reaction {rxn}')
flag = True
if flux_df.loc[rxn, 'vref_mean (mmol/L/h)'] < 0 and dG_df.loc[rxn, '∆G_max'] < 0:
print(f'The flux and ∆G range seem to be incompatible for reaction {rxn}')
flag = True
if flag is False:
print('Everything seems to be OK.')
return flag, flux_df, dG_df
| 38.173077
| 120
| 0.643829
|
import numpy as np
import pandas as pd
def _get_dG_list(rxn_names: list, stoic_matrix: np.ndarray, sub_conc: np.ndarray, prod_conc: np.ndarray,
dG_std: np.ndarray, gas_constant: float, temperature: float) -> tuple:
dG_list = []
dG_Q_list = []
ma_ratio_list = []
for rxn_i in range(len(rxn_names)):
rxn_subs_conc = np.sign(stoic_matrix[rxn_i, :]) * (sub_conc ** np.abs(stoic_matrix[rxn_i, :]))
rxn_prods_conc = np.sign(stoic_matrix[rxn_i, :]) * (prod_conc ** np.abs(stoic_matrix[rxn_i, :]))
subs_ind = np.where(rxn_subs_conc < 0)
subs_conc = rxn_subs_conc[subs_ind]
prods_ind = np.where(rxn_prods_conc > 0)
prods_conc = rxn_prods_conc[prods_ind]
subs_prod = np.abs(np.prod(subs_conc))
prods_prod = np.prod(prods_conc)
ma_ratio = prods_prod / subs_prod
dG_Q = gas_constant * temperature * np.log(ma_ratio)
dG = dG_std[rxn_i] + dG_Q
ma_ratio_list.append(ma_ratio)
dG_list.append(dG)
dG_Q_list.append(dG_Q)
return dG_list, dG_Q_list, ma_ratio_list
def calculate_dG(data_dict: dict, gas_constant: float, temperature: float, rxn_order: list = None) -> tuple:
"""
Given a dictionary representing a GRASP input file, calculates the minimum and maximum reaction dGs based on the
standard dGs in thermoRxns and metabolite concentrations in thermoMets.
It also calculates the mass-action ratio and the part of the dG based on the mass-action ratio.
Args:
data_dict: a dictionary that represents the excel file with the GRASP model.
gas_constant: the gas constant to calculate the Gibbs energy.
temperature: the temperature to calculate the Gibbs energy.
rxn_order: a list with the reactions order (optional).
Returns:
Mass action ratio dataframe, dG_Q dataframe, Gibbs energies dataframe.
"""
dG_Q_df = pd.DataFrame()
dG_df = pd.DataFrame()
ma_df = pd.DataFrame()
stoic_df = data_dict['stoic']
mets_conc_df = data_dict['thermoMets']
mets_conc_df['mean (M)'] = (mets_conc_df['min (M)'] + mets_conc_df['max (M)']) / 2.
dG_std_df = data_dict['thermoRxns']
dG_std_df['∆Gr_mean'] = (dG_std_df['∆Gr\'_min (kJ/mol)'] + dG_std_df['∆Gr\'_max (kJ/mol)']) / 2.
rxn_names = stoic_df.index.values
stoic_matrix = stoic_df.values
min_met_conc = mets_conc_df['min (M)'].values
max_met_conc = mets_conc_df['max (M)'].values
dG_list_mean, dG_Q_list_mean, ma_ratio_list_mean = _get_dG_list(rxn_names, stoic_matrix,
mets_conc_df['mean (M)'].values,
mets_conc_df['mean (M)'].values,
dG_std_df['∆Gr_mean'].values,
gas_constant, temperature)
dG_list_min, dG_Q_list_min, ma_ratio_list_min = _get_dG_list(rxn_names, stoic_matrix, max_met_conc, min_met_conc,
dG_std_df['∆Gr\'_min (kJ/mol)'].values,
gas_constant, temperature)
dG_list_max, dG_Q_list_max, ma_ratio_list_max = _get_dG_list(rxn_names, stoic_matrix, min_met_conc, max_met_conc,
dG_std_df['∆Gr\'_max (kJ/mol)'].values,
gas_constant, temperature)
ma_df['ma_min'] = ma_ratio_list_min
ma_df['ma_mean'] = ma_ratio_list_mean
ma_df['ma_max'] = ma_ratio_list_max
dG_Q_df['∆G_Q_min'] = dG_Q_list_min
dG_Q_df['∆G_Q_mean'] = dG_Q_list_mean
dG_Q_df['∆G_Q_max'] = dG_Q_list_max
dG_df['∆G_min'] = dG_list_min
dG_df['∆G_mean'] = dG_list_mean
dG_df['∆G_max'] = dG_list_max
ma_df.index = rxn_names
dG_Q_df.index = rxn_names
dG_df.index = rxn_names
if rxn_order:
ma_df = ma_df.reindex(rxn_order)
dG_Q_df = dG_Q_df.reindex(rxn_order)
dG_df = dG_df.reindex(rxn_order)
return ma_df, dG_Q_df, dG_df
def _compute_robust_fluxes(stoic_matrix: np.ndarray, meas_rates: np.ndarray, meas_rates_std: np.ndarray,
rxn_list: list) -> tuple:
# Determine measured fluxes and decompose stoichiometric matrix
id_meas = np.where(meas_rates != 0)
id_unkn = np.where(meas_rates == 0)
stoic_meas = stoic_matrix[:, id_meas]
stoic_meas = np.array([row[0] for row in stoic_meas])
stoic_unkn = stoic_matrix[:, id_unkn]
stoic_unkn = np.array([row[0] for row in stoic_unkn])
# Initialize final fluxes
v_mean = np.zeros(np.size(meas_rates))
v_std = np.zeros(np.size(meas_rates))
# Compute estimate Rred
Dm = np.diag(meas_rates_std[id_meas] ** 2)
Rred = np.subtract(stoic_meas, np.matmul(np.matmul(stoic_unkn, np.linalg.pinv(stoic_unkn)), stoic_meas))
[u, singVals, vh] = np.linalg.svd(Rred)
singVals = np.abs(singVals)
zero_sing_vals = np.where(singVals > 10 ** -12)
# If the system is fully determined, compute as follows
if len(zero_sing_vals[0]) == 0:
v_mean[id_unkn] = -np.matmul(np.matmul(np.linalg.pinv(stoic_unkn), stoic_meas), meas_rates[id_meas])
if len(np.where(v_mean == 0)[0]) > len(id_meas[0]):
zero_flux_rxns = list(set(np.where(v_mean == 0)[0]).difference(set(id_meas[0])))
raise RuntimeError('According to compute robust fluxes, there are reactions with zero flux in the model.\n'+
'Those reactions should be removed.\n' +
f'The reactions are {np.array(rxn_list)[[zero_flux_rxns]]}')
v_mean[np.where(v_mean == 0)] = meas_rates[id_meas]
v_std[id_unkn] = np.diag(np.matmul(
np.matmul(np.matmul(np.matmul(np.linalg.pinv(stoic_unkn), stoic_meas), Dm), np.transpose(stoic_meas)),
np.transpose(np.linalg.pinv(stoic_unkn))))
v_std[np.where(v_std == 0)] = np.diag(Dm)
else:
print('System is not fully determined and the fluxes cannot be determined.')
exit()
v_std = np.sqrt(v_std) # Compute std
return v_mean, v_std
def _get_balanced_s_matrix(data_dict: dict) -> tuple:
stoic_df = data_dict['stoic']
stoic_matrix = np.transpose(stoic_df.values)
rxn_list = stoic_df.index.values
mets_df = data_dict['mets']
balanced_mets_ind = np.where(mets_df['balanced?'].values == 1)
stoic_balanced = stoic_matrix[balanced_mets_ind, :][0]
return stoic_balanced, rxn_list
def _get_meas_rates(data_dict: dict, rxn_list: list) -> tuple:
meas_rates_df = data_dict['measRates']
meas_rates_ids = meas_rates_df.index.values
meas_rates_mean = np.zeros(len(rxn_list))
meas_rates_std = np.zeros(len(rxn_list))
meas_rates = zip(list(np.nonzero(np.in1d(rxn_list, meas_rates_ids))[0]), list(meas_rates_ids))
for meas_rxn_ind, meas_rxn in meas_rates:
meas_rates_mean[meas_rxn_ind] = meas_rates_df.loc[meas_rxn, 'vref_mean (mmol/L/h)']
meas_rates_std[meas_rxn_ind] = meas_rates_df.loc[meas_rxn, 'vref_std (mmol/L/h)']
return meas_rates_mean, meas_rates_std
def get_robust_fluxes(data_dict: dict, rxn_order: list = None) -> pd.DataFrame:
"""
Given a dictionary representing a GRASP input file, it calculates the robust fluxes (almost) as in GRASP,
unless the system is not fully determined.
Args:
data_dict: path to the GRASP input file
rxn_order: a list with the reactions order (optional)
Returns:
fluxes_df: dataframe with flux mean and std values
"""
fluxes_df = pd.DataFrame()
stoic_balanced, rxn_list = _get_balanced_s_matrix(data_dict)
# n_reactions = len(rxn_order)
meas_rates_mean, meas_rates_std = _get_meas_rates(data_dict, rxn_list)
v_mean, v_std = _compute_robust_fluxes(stoic_balanced, meas_rates_mean, meas_rates_std, rxn_list)
fluxes_df['vref_mean (mmol/L/h)'] = v_mean
fluxes_df['vref_std (mmol/L/h)'] = v_std
fluxes_df.index = rxn_list
if rxn_order:
fluxes_df = fluxes_df.reindex(rxn_order)
fluxes_df = fluxes_df.reindex(rxn_order)
return fluxes_df
def check_thermodynamic_feasibility(data_dict: dict) -> tuple:
"""
Given a dictionary representing a GRASP input file, it checks if the reaction's dG are compatible with the
respective fluxes. It works both when all fluxes are specified in measRates and when robust fluxes are calculated
for a fully determined system. If the fluxes are not fully specified not the system is fully determined, it
doesn't work.
Args:
data_dict: a dictionary representing a GRASP input file.
Returns:
Whether or not the model is thermodynamically feasible plus fluxes and Gibbs energies dataframes.
"""
print('\nChecking if fluxes and Gibbs energies are compatible.\n')
flag = False
temperature = 298 # in K
gas_constant = 8.314 * 10**-3 # in kJ K^-1 mol^-1
stoic_df = data_dict['stoic']
flux_df = data_dict['measRates']
ma_df, dG_Q_df, dG_df = calculate_dG(data_dict, gas_constant, temperature)
if len(stoic_df.index) != len(flux_df.index):
flux_df = get_robust_fluxes(data_dict)
for rxn in flux_df.index:
if flux_df.loc[rxn, 'vref_mean (mmol/L/h)'] > 0 and dG_df.loc[rxn, '∆G_min'] > 0:
print(f'The flux and ∆G range seem to be incompatible for reaction {rxn}')
flag = True
if flux_df.loc[rxn, 'vref_mean (mmol/L/h)'] < 0 and dG_df.loc[rxn, '∆G_max'] < 0:
print(f'The flux and ∆G range seem to be incompatible for reaction {rxn}')
flag = True
if flag is False:
print('Everything seems to be OK.')
return flag, flux_df, dG_df
| 4,055
| 0
| 92
|
d194ea8db0f61bff0f8a955602ce8f2eb76abe18
| 8,432
|
py
|
Python
|
src/synthetic_data.py
|
tomogwen/fpdcluster
|
afbb16ce1e0e428304867084fb59d62ae3931b81
|
[
"MIT"
] | 10
|
2020-06-05T12:46:21.000Z
|
2021-04-19T10:46:46.000Z
|
src/synthetic_data.py
|
tomogwen/fpdcluster
|
afbb16ce1e0e428304867084fb59d62ae3931b81
|
[
"MIT"
] | null | null | null |
src/synthetic_data.py
|
tomogwen/fpdcluster
|
afbb16ce1e0e428304867084fb59d62ae3931b81
|
[
"MIT"
] | null | null | null |
import clustering
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import dionysus as dion
import random
if __name__ == '__main__':
seed = 0
dataset = gen_data2(seed, noise=0.1, n_samples=100)
diagrams = compute_diagrams(dataset)
diagrams_cluster = clustering.reformat_diagrams(diagrams)
r, M = clustering.pd_fuzzy(diagrams_cluster, 3, verbose=True, max_iter=20)
print("Membership values")
print(r)
plot_dataset(dataset)
plot_all_diagrams(diagrams)
plot_three_clusters(M)
# Other synthetic data, not used in the paper
# data = gen_data(seed, noise=0.3)
# plot_all(data, diagrams)
# plot_clusters(M)
# plot_everything(dataset, diagrams)
| 27.736842
| 115
| 0.607448
|
import clustering
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import dionysus as dion
import random
def plot_all(data, diagrams):
fig = plt.figure(figsize=(20, 10))
for i in range(len(data)):
num = 241 + i
ax = plt.subplot(num)
plt.scatter(data[i][:, 0], data[i][:, 1])
ax = plt.subplot(num + 4)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
fig.suptitle("Datasets with corresponding persistence diagrams")
plt.show()
def compute_diagrams(data):
diagrams = []
for i in range(len(data)):
print("Processing data: " + str(i))
filtration = dion.fill_rips(data[i], 2, 3.0)
homology = dion.homology_persistence(filtration)
diagram = dion.init_diagrams(homology, filtration)
diagrams.append(diagram[1])
print()
return diagrams
def plot_clusters(M):
plt.scatter(M[0].T[0], M[0].T[1], c='r', label='Rings')
plt.scatter(M[1].T[0], M[1].T[1], c='b', label='Noise')
plt.xlim([0, 1.5])
plt.ylim([0, 1.75])
plt.plot([0.1, 1.2], [0.1, 1.2])
plt.legend()
plt.title("Persistence Diagram Cluster Centres")
plt.show()
def gen_data(seed, noise=0.05, n_samples=100):
print("\nGenerating data...\n")
np.random.seed(seed)
random.seed(seed)
data = []
data.append(datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise, random_state=seed)[0])
data.append(datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise, random_state=seed + 1)[0])
data.append(np.random.normal(size=(100, 2), scale=0.5))
data.append(0.9 * np.random.normal(size=(100, 2), scale=0.5))
return data
def gen_data2(seed, noise, n_samples):
dataset = []
np.random.seed(seed)
random.seed(seed)
# Noise
data = np.random.normal(size=(100, 2), scale=0.5)
dataset.append(data)
data = np.random.normal(size=(100, 2), scale=0.5)
data[:, 0] = data[:, 0] * 0.5
dataset.append(data)
data = np.random.normal(size=(100, 2), scale=0.5)
data[:, 1] = data[:, 1] * 0.7
dataset.append(data)
# One Ring (to rule them all)
data = datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise, random_state=seed)[0]
dataset.append(data)
data = datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise, random_state=seed+1)[0]
data[:, 0] = data[:, 0] * 0.5
dataset.append(data)
data = datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise * 1.5, random_state=seed+2)[0]
dataset.append(data)
# Two Rings
data1 = datasets.make_circles(n_samples=int(0.5*n_samples), factor=0.99, noise=noise, random_state=seed + 3)[0]
data1[:, 1] -= 1
data2 = datasets.make_circles(n_samples=int(0.5*n_samples), factor=0.99, noise=noise, random_state=seed + 4)[0]
data2[:, 1] += 1
data = np.concatenate((0.5 * data1, 0.5 * data2), axis=0)
dataset.append(data)
data1 = datasets.make_circles(n_samples=int(0.5*n_samples), factor=0.99, noise=noise, random_state=seed + 5)[0]
data1[:, 1] -= 1
data2 = datasets.make_circles(n_samples=int(0.5*n_samples), factor=0.99, noise=noise, random_state=seed + 6)[0]
data2[:, 1] += 1
data = np.concatenate((0.5 * data1, 0.5 * data2), axis=0)
data = np.rot90(data).T
dataset.append(data)
data1 = datasets.make_circles(n_samples=int(0.5*n_samples), factor=0.99, noise=noise, random_state=seed+7)[0]
data1[:, 1] -= 1
data2 = datasets.make_circles(n_samples=int(0.5*n_samples), factor=0.99, noise=noise*2, random_state=seed+8)[0]
data2[:, 1] += 1
data = np.concatenate((0.5*data1, 0.5*data2), axis=0)
dataset.append(data)
return dataset
def plot_dataset(dataset):
fig = plt.figure(figsize=(10, 10))
lim = 1.45
for i in range(len(dataset)):
num = 331 + i
ax = plt.subplot(num)
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_xticks([])
ax.set_yticks([])
plt.scatter(dataset[i][:, 0], dataset[i][:, 1])
plt.show()
def plot_everything(dataset, diagrams):
fig = plt.figure(figsize=(20, 10))
lim = 1.45
for i in range(3):
num = i+1
ax = plt.subplot(3, 6, num)
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_xticks([])
ax.set_yticks([])
plt.scatter(dataset[i][:, 0], dataset[i][:, 1])
ax = plt.subplot(3, 6, num+3)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
for i in range(3):
num = 7+i
ax = plt.subplot(3, 6, num)
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_xticks([])
ax.set_yticks([])
plt.scatter(dataset[i+3][:, 0], dataset[i+6][:, 1])
ax = plt.subplot(3, 6, num+3)
plot_diagram(diagrams[i+3], ax, lims=[0, 1.5, 0, 1.75])
for i in range(3):
num = 13+i
ax = plt.subplot(3, 6, num)
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_xticks([])
ax.set_yticks([])
plt.scatter(dataset[i+6][:, 0], dataset[i+6][:, 1])
ax = plt.subplot(3, 6, num+3)
plot_diagram(diagrams[i+6], ax, lims=[0, 1.5, 0, 1.75])
plt.show()
def plot_all_diagrams(diagrams):
fig = plt.figure(figsize=(10, 10))
for i in range(len(diagrams)):
num = 331 + i
ax = plt.subplot(num)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
# fig.suptitle("Datasets with corresponding persistence diagrams")
plt.show()
def plot_diagram(dgm, ax, show=False, labels=False, line_style=None, pt_style=None, lims=False):
# taken from Dionysus2 package
line_kwargs = {}
pt_kwargs = {}
if pt_style is not None:
pt_kwargs.update(pt_style)
if line_style is not None:
line_kwargs.update(line_style)
inf = float('inf')
if lims==False:
min_birth = min(p.birth for p in dgm if p.birth != inf)
max_birth = max(p.birth for p in dgm if p.birth != inf)
min_death = min(p.death for p in dgm if p.death != inf)
max_death = max(p.death for p in dgm if p.death != inf)
else:
min_birth = lims[0]
max_birth = lims[1]
min_death = lims[2]
max_death = lims[3]
ax.set_aspect('equal', 'datalim')
min_diag = min(min_birth, min_death)
max_diag = max(max_birth, max_death)
ax.scatter([p.birth for p in dgm], [p.death for p in dgm], **pt_kwargs)
ax.plot([min_diag, max_diag], [min_diag, max_diag], **line_kwargs)
# ax.set_xlabel('birth')
# ax.set_ylabel('death')
ax.set_xticks([])
ax.set_yticks([])
def plot_three_clusters(M):
fig = plt.figure(figsize=(3.33, 10))
lims = [0, 1.5, 0, 1.75]
min_birth = lims[0]
max_birth = lims[1]
min_death = lims[2]
max_death = lims[3]
# diagram 1
ax = plt.subplot(313)
ax.set_aspect('equal', 'datalim')
min_diag = min(min_birth, min_death)
max_diag = max(max_birth, max_death)
ax.scatter(M[0][:, 0], M[0][:, 1])
ax.plot([min_diag, max_diag], [min_diag, max_diag])
ax.set_xticks([])
ax.set_yticks([])
# diagram 2
ax = plt.subplot(311)
ax.set_aspect('equal', 'datalim')
min_diag = min(min_birth, min_death)
max_diag = max(max_birth, max_death)
ax.scatter(M[1][:, 0], M[1][:, 1])
ax.plot([min_diag, max_diag], [min_diag, max_diag])
ax.set_xticks([])
ax.set_yticks([])
# diagram 3
ax = plt.subplot(312)
ax.set_aspect('equal', 'datalim')
min_diag = min(min_birth, min_death)
max_diag = max(max_birth, max_death)
ax.scatter(M[2][:, 0], M[2][:, 1])
ax.plot([min_diag, max_diag], [min_diag, max_diag])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
if __name__ == '__main__':
seed = 0
dataset = gen_data2(seed, noise=0.1, n_samples=100)
diagrams = compute_diagrams(dataset)
diagrams_cluster = clustering.reformat_diagrams(diagrams)
r, M = clustering.pd_fuzzy(diagrams_cluster, 3, verbose=True, max_iter=20)
print("Membership values")
print(r)
plot_dataset(dataset)
plot_all_diagrams(diagrams)
plot_three_clusters(M)
# Other synthetic data, not used in the paper
# data = gen_data(seed, noise=0.3)
# plot_all(data, diagrams)
# plot_clusters(M)
# plot_everything(dataset, diagrams)
| 7,457
| 0
| 230
|
2e69f73189a3759642774829299275d95d3f03ef
| 3,821
|
py
|
Python
|
controllers/pathfinder.py
|
endymecy/NDIToolbox
|
f7a0a642b4a778d9d0c131871f4bfb9822ecb3da
|
[
"BSD-4-Clause"
] | 5
|
2017-02-28T16:16:06.000Z
|
2020-07-13T06:49:34.000Z
|
controllers/pathfinder.py
|
endymecy/NDIToolbox
|
f7a0a642b4a778d9d0c131871f4bfb9822ecb3da
|
[
"BSD-4-Clause"
] | 1
|
2018-08-19T19:08:14.000Z
|
2018-08-19T19:08:14.000Z
|
controllers/pathfinder.py
|
endymecy/NDIToolbox
|
f7a0a642b4a778d9d0c131871f4bfb9822ecb3da
|
[
"BSD-4-Clause"
] | 4
|
2017-10-25T20:17:15.000Z
|
2021-07-26T11:39:50.000Z
|
"""pathfinder.py - specifies paths and common filenames"""
__author__ = 'Chris R. Coughlin'
from models import config
import os.path
import sys
def normalized(path_fn):
"""Decorator to normalize (os.path.normcase) paths"""
return normalize
@normalized
def app_path():
"""Returns the base application path."""
if hasattr(sys, 'frozen'):
# Handles PyInstaller
entry_point = sys.executable
else:
import controllers
entry_point = os.path.dirname(controllers.__file__)
return os.path.dirname(entry_point)
@normalized
def user_path():
"""Returns the path for storing user data. If not already set,
returns user's home directory/nditoolbox and sets the default in the
config file."""
_config = config.Configure(config_path())
upath_key = "User Path"
if _config.has_app_option(upath_key):
return _config.get_app_option(upath_key)
else:
default_upath = os.path.normcase(os.path.join(os.path.expanduser('~'), 'nditoolbox'))
_config.set_app_option({upath_key: default_upath})
return default_upath
@normalized
def docs_path():
"""Returns the path to the HTML documentation."""
return os.path.join(app_path(), 'docs')
@normalized
def resource_path():
"""Returns the path to resources - home folder
for icons, bitmaps, etc."""
return os.path.join(app_path(), 'resources')
@normalized
def icons_path():
"""Returns the path to application icons"""
return os.path.join(resource_path(), 'icons')
@normalized
def icon_path():
"""Returns the path to the application's default
PNG icon"""
return os.path.join(icons_path(), 'a7117_64.png')
@normalized
def winicon_path():
"""Returns the path to the application's default
.ICO icon"""
return os.path.join(icons_path(), 'a7117_64.ico')
@normalized
def bitmap_path():
"""Returns the path to application bitmaps"""
return os.path.join(resource_path(), 'bitmaps')
@normalized
def textfiles_path():
"""Returns the path to application textfiles"""
return os.path.join(resource_path(), 'textfiles')
@normalized
def data_path():
"""Returns the path to data files"""
return os.path.join(user_path(), 'data')
@normalized
def thumbnails_path():
"""Returns the path to data thumbnails"""
return os.path.join(user_path(), 'thumbnails')
@normalized
def plugins_path():
"""Returns the path to plugins"""
return os.path.join(user_path(), 'plugins')
@normalized
def config_path():
"""Returns the path to the configuration file"""
return os.path.expanduser("~/nditoolbox.cfg")
@normalized
def log_path():
"""Returns the path to the log file. If not already set,
sets to user's home directory/nditoolbox.log and sets the default in the config file."""
_config = config.Configure(config_path())
logpath_key = "Log File"
if _config.has_app_option(logpath_key):
return _config.get_app_option(logpath_key)
else:
default_logpath = os.path.normcase(os.path.join(os.path.expanduser('~'), 'nditoolbox.log'))
_config.set_app_option({logpath_key: default_logpath})
return default_logpath
@normalized
def podmodels_path():
"""Returns the path to POD Toolkit models"""
return os.path.join(user_path(), "podmodels")
@normalized
def gates_path():
"""Returns the path to ultrasonic gates"""
return os.path.join(user_path(), "gates")
@normalized
def colormaps_path():
"""Returns the path to user-defined colormaps"""
return os.path.join(user_path(), "colormaps")
@normalized
def batchoutput_path():
"""Returns the path to data files produced with batch processing mode"""
return os.path.join(data_path(), "batch_output")
| 25.993197
| 99
| 0.690395
|
"""pathfinder.py - specifies paths and common filenames"""
__author__ = 'Chris R. Coughlin'
from models import config
import os.path
import sys
def normalized(path_fn):
"""Decorator to normalize (os.path.normcase) paths"""
def normalize():
return os.path.normcase(path_fn())
return normalize
@normalized
def app_path():
"""Returns the base application path."""
if hasattr(sys, 'frozen'):
# Handles PyInstaller
entry_point = sys.executable
else:
import controllers
entry_point = os.path.dirname(controllers.__file__)
return os.path.dirname(entry_point)
@normalized
def user_path():
"""Returns the path for storing user data. If not already set,
returns user's home directory/nditoolbox and sets the default in the
config file."""
_config = config.Configure(config_path())
upath_key = "User Path"
if _config.has_app_option(upath_key):
return _config.get_app_option(upath_key)
else:
default_upath = os.path.normcase(os.path.join(os.path.expanduser('~'), 'nditoolbox'))
_config.set_app_option({upath_key: default_upath})
return default_upath
@normalized
def docs_path():
"""Returns the path to the HTML documentation."""
return os.path.join(app_path(), 'docs')
@normalized
def resource_path():
"""Returns the path to resources - home folder
for icons, bitmaps, etc."""
return os.path.join(app_path(), 'resources')
@normalized
def icons_path():
"""Returns the path to application icons"""
return os.path.join(resource_path(), 'icons')
@normalized
def icon_path():
"""Returns the path to the application's default
PNG icon"""
return os.path.join(icons_path(), 'a7117_64.png')
@normalized
def winicon_path():
"""Returns the path to the application's default
.ICO icon"""
return os.path.join(icons_path(), 'a7117_64.ico')
@normalized
def bitmap_path():
"""Returns the path to application bitmaps"""
return os.path.join(resource_path(), 'bitmaps')
@normalized
def textfiles_path():
"""Returns the path to application textfiles"""
return os.path.join(resource_path(), 'textfiles')
@normalized
def data_path():
"""Returns the path to data files"""
return os.path.join(user_path(), 'data')
@normalized
def thumbnails_path():
"""Returns the path to data thumbnails"""
return os.path.join(user_path(), 'thumbnails')
@normalized
def plugins_path():
"""Returns the path to plugins"""
return os.path.join(user_path(), 'plugins')
@normalized
def config_path():
"""Returns the path to the configuration file"""
return os.path.expanduser("~/nditoolbox.cfg")
@normalized
def log_path():
"""Returns the path to the log file. If not already set,
sets to user's home directory/nditoolbox.log and sets the default in the config file."""
_config = config.Configure(config_path())
logpath_key = "Log File"
if _config.has_app_option(logpath_key):
return _config.get_app_option(logpath_key)
else:
default_logpath = os.path.normcase(os.path.join(os.path.expanduser('~'), 'nditoolbox.log'))
_config.set_app_option({logpath_key: default_logpath})
return default_logpath
@normalized
def podmodels_path():
"""Returns the path to POD Toolkit models"""
return os.path.join(user_path(), "podmodels")
@normalized
def gates_path():
"""Returns the path to ultrasonic gates"""
return os.path.join(user_path(), "gates")
@normalized
def colormaps_path():
"""Returns the path to user-defined colormaps"""
return os.path.join(user_path(), "colormaps")
@normalized
def batchoutput_path():
"""Returns the path to data files produced with batch processing mode"""
return os.path.join(data_path(), "batch_output")
| 38
| 0
| 27
|
1572db60c45a7a3e80068f389efcd4692bc26899
| 338
|
py
|
Python
|
djapi/api/constants.py
|
dgouldin/djangocon-eu-2015
|
890057a3451231f96d15c65011d867dedfd5f9fa
|
[
"MIT"
] | null | null | null |
djapi/api/constants.py
|
dgouldin/djangocon-eu-2015
|
890057a3451231f96d15c65011d867dedfd5f9fa
|
[
"MIT"
] | null | null | null |
djapi/api/constants.py
|
dgouldin/djangocon-eu-2015
|
890057a3451231f96d15c65011d867dedfd5f9fa
|
[
"MIT"
] | null | null | null |
TRANSACTION_STATUS_PENDING = 'pending'
TRANSACTION_STATUS_COMPLETE = 'complete'
TRANSACTION_STATUS_REFUNDED = 'refunded'
TRANSACTION_STATUSES = (
(TRANSACTION_STATUS_PENDING, TRANSACTION_STATUS_PENDING),
(TRANSACTION_STATUS_COMPLETE, TRANSACTION_STATUS_COMPLETE),
(TRANSACTION_STATUS_REFUNDED, TRANSACTION_STATUS_REFUNDED),
)
| 37.555556
| 63
| 0.837278
|
TRANSACTION_STATUS_PENDING = 'pending'
TRANSACTION_STATUS_COMPLETE = 'complete'
TRANSACTION_STATUS_REFUNDED = 'refunded'
TRANSACTION_STATUSES = (
(TRANSACTION_STATUS_PENDING, TRANSACTION_STATUS_PENDING),
(TRANSACTION_STATUS_COMPLETE, TRANSACTION_STATUS_COMPLETE),
(TRANSACTION_STATUS_REFUNDED, TRANSACTION_STATUS_REFUNDED),
)
| 0
| 0
| 0
|
e81e348e3912f9783df345478bd8ba60a40bfcc1
| 2,040
|
py
|
Python
|
test/test_minmax.py
|
shoaibmahmod7/Turbomachinery-Rotors-Balancing
|
8bb4c1ec97c4646bcd69ed3398aafc7f985bc96d
|
[
"MIT"
] | 1
|
2022-02-03T17:14:16.000Z
|
2022-02-03T17:14:16.000Z
|
test/test_minmax.py
|
shoaibmahmod7/Turbomachinery-Rotors-Balancing
|
8bb4c1ec97c4646bcd69ed3398aafc7f985bc96d
|
[
"MIT"
] | null | null | null |
test/test_minmax.py
|
shoaibmahmod7/Turbomachinery-Rotors-Balancing
|
8bb4c1ec97c4646bcd69ed3398aafc7f985bc96d
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
import yaml
import pytest
import test_tools
import hsbalance as hs
'''This module is for testing Min_Max model solver'''
# Reading the test cases from config.yaml file, to add more tests follow the rules on the file
tests, tests_id, timeout = test_tools.get_tests_from_yaml('Min_max')
@pytest.mark.parametrize('param, expected',
tests,
ids=tests_id
)
@pytest.mark.timeout(timeout)
def test_Min_max(param, expected):
'''
Testing instantiate Min_Max model and test it against test cases
'''
my_ALPHA = hs.Alpha()
A = hs.convert_matrix_to_cart(param[0]['A'])
weight_const = param[0]['weight_const']
A0 = [0]
# It is acceptable to enter either direct_matrix or A,B,U matrices
try:
direct_matrix = hs.convert_matrix_to_cart(param[0]['ALPHA'])
my_ALPHA.add(direct_matrix=direct_matrix)
except KeyError:
B = hs.convert_matrix_to_cart(param[0]['B'])
U = hs.convert_matrix_to_cart(param[0]['U'])
my_ALPHA.add(A=A, B=B, U=U)
try:
A0 = hs.convert_matrix_to_cart(param[0]['A0'])
except KeyError:
pass
expected_W = hs.convert_matrix_to_cart(expected)
my_model = hs.Min_max(A, my_ALPHA,
weight_const=weight_const,name='Min_max') # Setting the model almost with no constraints
W = my_model.solve()
print((expected))
print('Residual Vibration rmse calculated = ', my_model.rmse())
print('Residual Vibration rmse from test_case = ',
hs.rmse(hs.residual_vibration(my_ALPHA.value, expected_W, A)))
print('expected_residual_vibration',
hs.convert_matrix_to_math(my_model.expected_residual_vibration()))
print('Correction weights', hs.convert_cart_math(W))
# Constraint Minmax algorithm was slightly inefficient in CVXPY
# The rmse was marginally more than the author solution
np.testing.assert_allclose(W, expected_W, rtol=0.09) # allowance 9% error
| 37.090909
| 118
| 0.673039
|
import numpy as np
import sys
import yaml
import pytest
import test_tools
import hsbalance as hs
'''This module is for testing Min_Max model solver'''
# Reading the test cases from config.yaml file, to add more tests follow the rules on the file
tests, tests_id, timeout = test_tools.get_tests_from_yaml('Min_max')
@pytest.mark.parametrize('param, expected',
tests,
ids=tests_id
)
@pytest.mark.timeout(timeout)
def test_Min_max(param, expected):
'''
Testing instantiate Min_Max model and test it against test cases
'''
my_ALPHA = hs.Alpha()
A = hs.convert_matrix_to_cart(param[0]['A'])
weight_const = param[0]['weight_const']
A0 = [0]
# It is acceptable to enter either direct_matrix or A,B,U matrices
try:
direct_matrix = hs.convert_matrix_to_cart(param[0]['ALPHA'])
my_ALPHA.add(direct_matrix=direct_matrix)
except KeyError:
B = hs.convert_matrix_to_cart(param[0]['B'])
U = hs.convert_matrix_to_cart(param[0]['U'])
my_ALPHA.add(A=A, B=B, U=U)
try:
A0 = hs.convert_matrix_to_cart(param[0]['A0'])
except KeyError:
pass
expected_W = hs.convert_matrix_to_cart(expected)
my_model = hs.Min_max(A, my_ALPHA,
weight_const=weight_const,name='Min_max') # Setting the model almost with no constraints
W = my_model.solve()
print((expected))
print('Residual Vibration rmse calculated = ', my_model.rmse())
print('Residual Vibration rmse from test_case = ',
hs.rmse(hs.residual_vibration(my_ALPHA.value, expected_W, A)))
print('expected_residual_vibration',
hs.convert_matrix_to_math(my_model.expected_residual_vibration()))
print('Correction weights', hs.convert_cart_math(W))
# Constraint Minmax algorithm was slightly inefficient in CVXPY
# The rmse was marginally more than the author solution
np.testing.assert_allclose(W, expected_W, rtol=0.09) # allowance 9% error
| 0
| 0
| 0
|
fc71471e251cfc681a314a5d4c50edf2f39c8b52
| 8,471
|
py
|
Python
|
pymatex/search/IndexSearchVisitor.py
|
Gawaboumga/PyMatex
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
[
"MIT"
] | 1
|
2019-03-05T09:45:04.000Z
|
2019-03-05T09:45:04.000Z
|
pymatex/search/IndexSearchVisitor.py
|
Gawaboumga/PyMatex
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
[
"MIT"
] | null | null | null |
pymatex/search/IndexSearchVisitor.py
|
Gawaboumga/PyMatex
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
[
"MIT"
] | null | null | null |
from pymatex.listener import MatexASTVisitor
from pymatex.node import *
| 35.894068
| 98
| 0.67678
|
from pymatex.listener import MatexASTVisitor
from pymatex.node import *
class IndexSearchVisitor(MatexASTVisitor.MatexASTVisitor):
def __init__(self, data: dict):
self.data = data
self.results = {}
self.seen_constants = {}
self.seen_variables = {}
self.bound_variables = set()
def get_results(self):
return self.results
def visit_addition(self, addition_node: Addition):
depth_lhs = addition_node.lhs.accept(self)
depth_rhs = addition_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.search(node_depth, NodeType.ADDITION)
return node_depth
def visit_constant(self, constant_node: Constant):
node_depth = 0
self.search_constant(node_depth, NodeType.CONSTANT, constant_node.value)
return node_depth
def visit_division(self, division_node: Division):
depth_lhs = division_node.lhs.accept(self)
depth_rhs = division_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.search(node_depth, NodeType.DIVISION)
return node_depth
def visit_exponentiation(self, exponentiation_node: Exponentiation):
depth_expr = exponentiation_node.lhs.accept(self)
depth_exponent = exponentiation_node.rhs.accept(self)
node_depth = max(depth_expr, depth_exponent) + 1
self.search(node_depth, NodeType.EXPONENTIATION)
return node_depth
def visit_fraction(self, fraction_node: Fraction):
if fraction_node.variable:
fraction_node.variable.accept(self)
fraction_node.start_range.accept(self)
if fraction_node.end_range:
fraction_node.end_range.accept(self)
self.add_bound_variable(fraction_node.variable)
depth_expression = fraction_node.expression.accept(self)
self.remove_bound_variable(fraction_node.variable)
node_depth = depth_expression + 1
self.search(node_depth, NodeType.FRACTION)
return node_depth
def visit_function(self, function_node: Function):
first_argument = function_node.argument(0)
depth = first_argument.accept(self)
for i in range(1, function_node.number_of_arguments()):
depth = min(depth, function_node.argument(i).accept(self))
node_depth = depth + 1
self.search(node_depth, NodeType.FUNCTION)
return node_depth
def visit_indexed_variable(self, indexed_variable_node: IndexedVariable):
depth = indexed_variable_node.index.accept(self)
node_depth = depth + 1
self.search_variable(node_depth, NodeType.INDEXEDVARIABLE, indexed_variable_node.variable)
return node_depth
def visit_integral(self, integral_node: Integral):
integral_node.variable.accept(self)
integral_node.start_range.accept(self)
integral_node.end_range.accept(self)
self.add_bound_variable(integral_node.variable)
depth_expression = integral_node.expression.accept(self)
self.remove_bound_variable(integral_node.variable)
node_depth = depth_expression + 1
self.search(node_depth, NodeType.SUMMATION)
return node_depth
def visit_multiplication(self, multiplication_node: Multiplication):
depth_lhs = multiplication_node.lhs.accept(self)
depth_rhs = multiplication_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.search(node_depth, NodeType.MULTIPLICATION)
return node_depth
def visit_negate(self, negate_node: Negate):
depth = negate_node.node.accept(self)
self.search(depth + 1, NodeType.NEGATE)
return depth
def visit_set(self, set_node: Set):
depth_lhs = set_node.lhs.accept(self)
depth_rhs = set_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.search(node_depth, NodeType.SET)
return node_depth
def visit_set_difference(self, set_difference: SetDifference):
depth_lhs = set_difference.lhs.accept(self)
depth_rhs = set_difference.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.search(node_depth, NodeType.SET_DIFFERENCE)
return node_depth
def visit_product(self, product_node: Product):
if product_node.variable:
product_node.variable.accept(self)
product_node.start_range.accept(self)
if product_node.end_range:
product_node.end_range.accept(self)
self.add_bound_variable(product_node.variable)
depth_expression = product_node.expression.accept(self)
self.remove_bound_variable(product_node.variable)
node_depth = depth_expression + 1
self.search(node_depth, NodeType.PRODUCT)
return node_depth
def visit_subtraction(self, subtraction_node: Subtraction):
depth_lhs = subtraction_node.lhs.accept(self)
depth_rhs = subtraction_node.rhs.accept(self)
node_depth = max(depth_lhs, depth_rhs) + 1
self.search(node_depth, NodeType.SUBTRACTION)
return node_depth
def visit_summation(self, summation_node: Summation):
if summation_node.variable:
summation_node.variable.accept(self)
summation_node.start_range.accept(self)
if summation_node.end_range:
summation_node.end_range.accept(self)
self.add_bound_variable(summation_node.variable)
depth_expression = summation_node.expression.accept(self)
self.remove_bound_variable(summation_node.variable)
node_depth = depth_expression + 1
self.search(node_depth, NodeType.SUMMATION)
return node_depth
def visit_variable(self, variable_node: Variable):
node_depth = 0
if str(variable_node.variable) in self.bound_variables:
self.search_bound_variable(node_depth, NodeType.BOUNDVARIABLE, variable_node.variable)
else:
self.search_free_variable(node_depth, NodeType.VARIABLE, variable_node.variable)
return node_depth
def search(self, node_depth: int, node_type: NodeType):
nodes = self.data.get(node_depth, None)
if nodes is None:
return
objects = nodes.get(node_type, None)
if objects:
self.__add(objects, 100)
def search_constant(self, node_depth: int, node_type: NodeType, external_data: str):
nodes = self.data.get(node_depth, None)
if nodes is None:
return
objects = nodes.get(node_type, None)
if objects is None:
return
associated = objects.get(external_data, None)
if associated:
self.__add(associated, 100)
else:
for mathematical_objects in objects.values():
self.__add(mathematical_objects, 70)
def search_bound_variable(self, node_depth: int, node_type: NodeType, external_data: str):
nodes = self.data.get(node_depth, None)
if nodes is None:
return
objects = nodes.get(node_type, None)
if objects is None:
return
associated = objects.get(external_data, None)
if associated:
self.__add(associated, 100)
for mathematical_objects in objects.values():
self.__add(mathematical_objects, 70)
free_variables = nodes.get(node_type, None)
if objects is None:
return
for mathematical_objects in free_variables.values():
self.__add(mathematical_objects, 30)
def search_free_variable(self, node_depth: int, node_type: NodeType, external_data: str):
nodes = self.data.get(node_depth, None)
if nodes is None:
return
objects = nodes.get(node_type, None)
if objects is None:
return
associated = objects.get(external_data, None)
if associated:
self.__add(associated, 100)
for mathematical_objects in objects.values():
self.__add(mathematical_objects, 70)
def __add(self, items, value):
for item in items:
new_value = self.results.get(item, 0) + value
self.results[item] = new_value
def add_bound_variable(self, variable: Variable):
self.bound_variables.add(str(variable))
def remove_bound_variable(self, variable: Variable):
self.bound_variables.remove(str(variable))
| 7,663
| 37
| 698
|
8c6e39a2577187b6a1f33f6467e3fe9575d74f83
| 1,157
|
py
|
Python
|
server/DbHourlyTask.py
|
chuckablack/quokka-prime
|
6429c09039f37887f4b0d5a196f1df2712136de7
|
[
"MIT"
] | 5
|
2021-05-29T20:15:16.000Z
|
2021-11-01T18:35:55.000Z
|
server/DbHourlyTask.py
|
chuckablack/quokka-prime
|
6429c09039f37887f4b0d5a196f1df2712136de7
|
[
"MIT"
] | null | null | null |
server/DbHourlyTask.py
|
chuckablack/quokka-prime
|
6429c09039f37887f4b0d5a196f1df2712136de7
|
[
"MIT"
] | 4
|
2021-05-18T06:50:34.000Z
|
2021-09-23T10:23:09.000Z
|
from datetime import datetime, timedelta
import time
from db_apis import trim_tables, create_summaries
| 30.447368
| 74
| 0.638721
|
from datetime import datetime, timedelta
import time
from db_apis import trim_tables, create_summaries
class DbHourlyTask:
def __init__(self):
self.terminate = False
self.current_hour = str(datetime.now())[:13]
print("---> starting background db hourly task")
def set_terminate(self):
if not self.terminate:
self.terminate = True
print("\n\n...gracefully exiting db_hourly_task_thread")
def start(self):
while True and not self.terminate:
this_hour = str(datetime.now())[:13]
if this_hour == self.current_hour:
time.sleep(15)
continue
# trim old data from status and diagnostic tables
status_expire_after = datetime.now() - timedelta(hours=24)
diagnostics_expire_after = datetime.now() - timedelta(hours=2)
trim_tables(status_expire_after, diagnostics_expire_after)
# create hourly summaries from status tables
create_summaries(self.current_hour)
self.current_hour = this_hour
print("db_hourly_task_thread exit complete")
| 951
| -2
| 104
|
ecc51e60591a3f817b2262a421173a5caf35191e
| 591
|
py
|
Python
|
src/sequence/quick_sort.py
|
JadielTeofilo/General-Algorithms
|
dfcf86c6ecd727573079f8971187c47bdb7a37bb
|
[
"MIT"
] | null | null | null |
src/sequence/quick_sort.py
|
JadielTeofilo/General-Algorithms
|
dfcf86c6ecd727573079f8971187c47bdb7a37bb
|
[
"MIT"
] | null | null | null |
src/sequence/quick_sort.py
|
JadielTeofilo/General-Algorithms
|
dfcf86c6ecd727573079f8971187c47bdb7a37bb
|
[
"MIT"
] | null | null | null |
##################### QuickSort ###################
from typing import List
def quick_sort(nums: List[int]) -> List[int]:
""" Does recursive sorting using quick sort """
if len(nums) < 2:
return nums
mid: int = (len(nums) - 1)//2
smaller_values: List[int] = [num for i, num in enumerate(nums)
if num <= nums[mid] and i != mid]
bigger_values: List[int] = [num for num in nums
if num > nums[mid]]
return quick_sort(smaller_values) + [nums[mid]] + quick_sort(bigger_values)
| 36.9375
| 79
| 0.509306
|
##################### QuickSort ###################
from typing import List
def quick_sort(nums: List[int]) -> List[int]:
""" Does recursive sorting using quick sort """
if len(nums) < 2:
return nums
mid: int = (len(nums) - 1)//2
smaller_values: List[int] = [num for i, num in enumerate(nums)
if num <= nums[mid] and i != mid]
bigger_values: List[int] = [num for num in nums
if num > nums[mid]]
return quick_sort(smaller_values) + [nums[mid]] + quick_sort(bigger_values)
| 0
| 0
| 0
|
65f87122c687e34d582f25f7881fa6227314080c
| 192
|
py
|
Python
|
Lesson 2 - Neural Networks/softmax.py
|
Yasir323/PyTorch-Course
|
18fdc866738b4f3dd9022cfe62863697c594b54c
|
[
"MIT"
] | null | null | null |
Lesson 2 - Neural Networks/softmax.py
|
Yasir323/PyTorch-Course
|
18fdc866738b4f3dd9022cfe62863697c594b54c
|
[
"MIT"
] | null | null | null |
Lesson 2 - Neural Networks/softmax.py
|
Yasir323/PyTorch-Course
|
18fdc866738b4f3dd9022cfe62863697c594b54c
|
[
"MIT"
] | null | null | null |
import numpy as np
| 21.333333
| 38
| 0.604167
|
import numpy as np
def softmax(arr):
expL = np.exp(arr) # Broadcasting
sumExpL = sum(expL)
result = []
for i in expL:
result.append(i * 1.0/sumExpL)
return result
| 151
| 0
| 23
|
421dca101f5521f89f68b7427dd0c0fbbe13d896
| 590
|
py
|
Python
|
tests/test_end_to_end.py
|
MetroStar/bitnest
|
a8d9cef5a17a5366e088a774ae951a0f06f97ae7
|
[
"MIT"
] | 4
|
2021-09-16T21:33:13.000Z
|
2022-01-18T22:05:57.000Z
|
tests/test_end_to_end.py
|
MetroStar/bitnest
|
a8d9cef5a17a5366e088a774ae951a0f06f97ae7
|
[
"MIT"
] | 1
|
2021-12-02T03:47:45.000Z
|
2021-12-02T03:47:45.000Z
|
tests/test_end_to_end.py
|
MetroStar/bitnest
|
a8d9cef5a17a5366e088a774ae951a0f06f97ae7
|
[
"MIT"
] | null | null | null |
import pytest
from models.test import StructA
from models.simple import MILSTD_1553_Message
from models.chapter10 import MILSTD_1553_Data_Packet_Format_1
@pytest.mark.parametrize(
"struct", [StructA, MILSTD_1553_Message, MILSTD_1553_Data_Packet_Format_1]
)
| 28.095238
| 78
| 0.733898
|
import pytest
from models.test import StructA
from models.simple import MILSTD_1553_Message
from models.chapter10 import MILSTD_1553_Data_Packet_Format_1
@pytest.mark.parametrize(
"struct", [StructA, MILSTD_1553_Message, MILSTD_1553_Data_Packet_Format_1]
)
def test_realize_paths(struct):
expression = struct.expression()
source = (
expression.transform("realize_datatypes")
.transform("realize_conditions")
.transform("realize_offsets")
.transform("parser_datatype")
.transform("arithmetic_simplify")
.backend("python")
)
| 304
| 0
| 22
|
67c0fe84b4a431512636cf8da382dec2c62878d2
| 280
|
py
|
Python
|
actions/servers_list.py
|
nzlosh/stackstorm-powerdns
|
f554376af25dbdfa6c0df5e376a7a02287cee1cf
|
[
"Apache-2.0"
] | null | null | null |
actions/servers_list.py
|
nzlosh/stackstorm-powerdns
|
f554376af25dbdfa6c0df5e376a7a02287cee1cf
|
[
"Apache-2.0"
] | null | null | null |
actions/servers_list.py
|
nzlosh/stackstorm-powerdns
|
f554376af25dbdfa6c0df5e376a7a02287cee1cf
|
[
"Apache-2.0"
] | null | null | null |
from lib.base import PowerDNSClientAction
class ServerListAction(PowerDNSClientAction):
"""
List available PowerDNS servers.
"""
| 23.333333
| 53
| 0.703571
|
from lib.base import PowerDNSClientAction
class ServerListAction(PowerDNSClientAction):
"""
List available PowerDNS servers.
"""
def run(self, response_timeout=5):
super(ServerList, self).run(response_timeout)
return (True, self.servers_list())
| 110
| 0
| 27
|
6ed442ecc79bec374b4c5e3179ec155f952e0f3e
| 14,122
|
py
|
Python
|
tests/test_results.py
|
SuadeLabs/rattr
|
22b82d31d4cebf0a7107fa1fb496a070b2e1f4ad
|
[
"MIT"
] | 6
|
2021-11-10T11:13:37.000Z
|
2022-01-19T16:15:17.000Z
|
tests/test_results.py
|
SuadeLabs/ratter
|
22b82d31d4cebf0a7107fa1fb496a070b2e1f4ad
|
[
"MIT"
] | 13
|
2021-11-10T11:39:12.000Z
|
2022-03-01T10:27:49.000Z
|
tests/test_results.py
|
SuadeLabs/rattr
|
22b82d31d4cebf0a7107fa1fb496a070b2e1f4ad
|
[
"MIT"
] | null | null | null |
from unittest import mock
from rattr.analyser.context import Call, Func, Import, Name
from rattr.analyser.context.symbol import Class
from rattr.analyser.results import generate_results_from_ir
| 29.117526
| 82
| 0.361139
|
from unittest import mock
from rattr.analyser.context import Call, Func, Import, Name
from rattr.analyser.context.symbol import Class
from rattr.analyser.results import generate_results_from_ir
class TestResults:
def test_generate_results_from_ir_no_calls(self):
# No calls
fn_ir = {
"sets": {
Name("arg.attr", "arg"),
},
"gets": {
Name("arg.another_attr", "arg"),
},
"dels": set(),
"calls": set(),
}
file_ir = {Func("fn", ["arg"], None, None): fn_ir}
expected = {
"fn": {
"sets": {"arg.attr"},
"gets": {"arg.another_attr"},
"dels": set(),
"calls": set(),
}
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_generate_results_from_ir_simple(self):
# Calls
fn_a = Func("fn_a", ["arg"], None, None)
fn_b = Func("fn_b", ["arg_b"], None, None)
fn_a_ir = {
"sets": {
Name("arg.attr", "arg"),
},
"gets": {
Name("arg.another_attr", "arg"),
},
"dels": set(),
"calls": {
Call("fn_b()", ["arg"], {}, target=fn_b),
},
}
fn_b_ir = {
"sets": {
Name("arg_b.set_in_fn_b", "arg_b"),
},
"gets": {
Name("arg_b.get_in_fn_b", "arg_b"),
},
"dels": set(),
"calls": set(),
}
file_ir = {
fn_a: fn_a_ir,
fn_b: fn_b_ir,
}
expected = {
"fn_a": {
"sets": {"arg.attr", "arg.set_in_fn_b"},
"gets": {"arg.another_attr", "arg.get_in_fn_b"},
"dels": set(),
"calls": {"fn_b()"},
},
"fn_b": {
"sets": {"arg_b.set_in_fn_b"},
"gets": {"arg_b.get_in_fn_b"},
"dels": set(),
"calls": set(),
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_generate_results_from_ir_direct_recursion(self):
# Direct recursion
fn_ir = {
"sets": {
Name("arg.attr", "arg"),
},
"gets": {
Name("arg.another_attr", "arg"),
},
"dels": set(),
"calls": {Call("fn()", ["arg"], {})},
}
file_ir = {Func("fn", ["arg"], None, None): fn_ir}
expected = {
"fn": {
"sets": {"arg.attr"},
"gets": {"arg.another_attr"},
"dels": set(),
"calls": {"fn()"},
}
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_generate_results_from_ir_indirect_recursion(self):
# Indirect recursion
fn_a = Func("fn_a", ["arg_a"], None, None)
fn_b = Func("fn_b", ["arg_b"], None, None)
fn_a_ir = {
"sets": {Name("arg_a.get_from_a", "arg_a")},
"gets": set(),
"dels": set(),
"calls": {Call("fn_b()", ["arg_a"], {}, target=fn_b)},
}
fn_b_ir = {
"sets": {Name("arg_b.get_from_b", "arg_b")},
"gets": set(),
"dels": set(),
"calls": {Call("fn_a()", ["arg_b"], {}, target=fn_a)},
}
file_ir = {
fn_a: fn_a_ir,
fn_b: fn_b_ir,
}
expected = {
"fn_a": {
"sets": {"arg_a.get_from_a", "arg_a.get_from_b"},
"gets": set(),
"dels": set(),
"calls": {"fn_b()"},
},
"fn_b": {
"sets": {"arg_b.get_from_a", "arg_b.get_from_b"},
"gets": set(),
"dels": set(),
"calls": {"fn_a()"},
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_generate_results_from_ir_child_has_direct_recursion(self):
fn_a = Func("fn_a", ["x"], None, None)
fn_b = Func("fn_b", ["arg"], None, None)
fn_a_ir = {
"sets": set(),
"gets": {Name("x.attr", "x")},
"dels": set(),
"calls": {Call("fn_b()", ["x"], {}, target=fn_b)},
}
fn_b_ir = {
"sets": set(),
"gets": {Name("arg.field", "arg")},
"dels": set(),
"calls": {Call("fn_b()", ["arg"], {}, target=fn_b)},
}
file_ir = {
fn_a: fn_a_ir,
fn_b: fn_b_ir,
}
expected = {
"fn_a": {
"sets": set(),
"gets": {"x.attr", "x.field"},
"dels": set(),
"calls": {"fn_b()"},
},
"fn_b": {
"sets": set(),
"gets": {"arg.field"},
"dels": set(),
"calls": {"fn_b()"},
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_generate_results_from_ir_child_has_indirect_recursion(self):
fn_a = Func("fn_a", ["a"], None, None)
fn_b = Func("fn_b", ["b"], None, None)
fn_c = Func("fn_c", ["c"], None, None)
fn_a_ir = {
"sets": set(),
"gets": {Name("a.in_a", "a")},
"dels": set(),
"calls": {Call("fn_b()", ["a"], {}, target=fn_b)},
}
fn_b_ir = {
"sets": set(),
"gets": {Name("b.in_b", "b")},
"dels": set(),
"calls": {Call("fn_c()", ["b"], {}, target=fn_c)},
}
fn_c_ir = {
"sets": set(),
"gets": {Name("c.in_c", "c")},
"dels": set(),
"calls": {Call("fn_b()", [], {"b": "c"}, target=fn_b)},
}
file_ir = {
fn_a: fn_a_ir,
fn_b: fn_b_ir,
fn_c: fn_c_ir,
}
expected = {
"fn_a": {
"sets": set(),
"gets": {"a.in_a", "a.in_b", "a.in_c"},
"dels": set(),
"calls": {"fn_b()"},
},
"fn_b": {
"sets": set(),
"gets": {"b.in_b", "b.in_c"},
"dels": set(),
"calls": {"fn_c()"},
},
"fn_c": {
"sets": set(),
"gets": {"c.in_b", "c.in_c"},
"dels": set(),
"calls": {"fn_b()"},
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_generate_results_from_ir_repeated_calls(self):
# Repeated calls that should be ignored
fn_a = Func("fn_a", ["a"], None, None)
fn_b = Func("fn_b", ["b"], None, None)
fn_a_ir = {
"sets": set(),
"gets": {Name("a.in_a", "a")},
"dels": set(),
"calls": {
Call("fn_b()", ["a.attr"], {}, target=fn_b),
Call("fn_b()", ["a.attr"], {}, target=fn_b),
},
}
fn_b_ir = {
"sets": set(),
"gets": {Name("b.in_b", "b")},
"dels": set(),
"calls": set(),
}
file_ir = {
fn_a: fn_a_ir,
fn_b: fn_b_ir,
}
expected = {
"fn_a": {
"sets": set(),
"gets": {"a.in_a", "a.attr.in_b"},
"dels": set(),
"calls": {"fn_b()"},
},
"fn_b": {
"sets": set(),
"gets": {"b.in_b"},
"dels": set(),
"calls": set(),
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
# Repeated calls that should not be ignored
fn_a = Func("fn_a", ["a"], None, None)
fn_b = Func("fn_b", ["b"], None, None)
fn_a_ir = {
"sets": set(),
"gets": {Name("a.in_a", "a")},
"dels": set(),
"calls": {
Call("fn_b()", ["a.attr_one"], {}, target=fn_b),
Call("fn_b()", ["a.attr_two"], {}, target=fn_b),
},
}
fn_b_ir = {
"sets": set(),
"gets": {Name("b.in_b", "b")},
"dels": set(),
"calls": set(),
}
file_ir = {
fn_a: fn_a_ir,
fn_b: fn_b_ir,
}
expected = {
"fn_a": {
"sets": set(),
"gets": {"a.in_a", "a.attr_one.in_b", "a.attr_two.in_b"},
"dels": set(),
"calls": {"fn_b()"},
},
"fn_b": {
"sets": set(),
"gets": {"b.in_b"},
"dels": set(),
"calls": set(),
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
def test_imports_ir(self, file_ir_from_dict):
# Simple
imports_ir = {
"module": file_ir_from_dict(
{
Func("act", ["arg"], None, None): {
"sets": set(),
"gets": {
Name("arg.attr", "arg"),
},
"dels": set(),
"calls": set(),
}
}
)
}
_i = Import("act", "module.act")
_i.module_name = "module"
_i.module_spec = mock.Mock()
fn = Func("fn", ["ms"], None, None)
fn_ir = {
"sets": set(),
"gets": set(),
"dels": set(),
"calls": {Call("act()", ["ms"], {}, target=_i)},
}
file_ir = {
fn: fn_ir,
}
expected = {
"fn": {
"sets": set(),
"gets": {"ms.attr"},
"dels": set(),
"calls": {"act()"},
}
}
assert generate_results_from_ir(file_ir, imports_ir) == expected
# Chained
_i_second = Import("second", "chained.second")
_i_second.module_name = "chained"
_i_second.module_spec = mock.Mock()
imports_ir = {
"module": file_ir_from_dict(
{
Func("first", ["arrg"], None, None): {
"sets": set(),
"gets": set(),
"dels": set(),
"calls": {Call("second", ["arrg"], {}, target=_i_second)},
}
}
),
"chained": file_ir_from_dict(
{
Func("second", ["blarg"], None, None): {
"sets": set(),
"gets": {
Name("blarg._attr", "blarg"),
},
"dels": set(),
"calls": set(),
}
}
),
}
_i_module = Import("first", "module.first")
_i_module.module_name = "module"
_i_module.module_spec = mock.Mock()
fn = Func("fn", ["flarg"], None, None)
fn_ir = {
"sets": set(),
"gets": set(),
"dels": set(),
"calls": {Call("first()", ["flarg"], {}, target=_i_module)},
}
file_ir = {
fn: fn_ir,
}
expected = {
"fn": {
"sets": set(),
"gets": {"flarg._attr"},
"dels": set(),
"calls": {
"first()",
},
}
}
assert generate_results_from_ir(file_ir, imports_ir) == expected
def test_class(self):
cls_inst = Class("SomeClass", ["self", "arg"], None, None)
cls_inst_ir = {
"sets": {
Name("self.my_attr", "self"),
},
"gets": {
Name("arg.attr_in_init", "arg"),
},
"dels": set(),
"calls": set(),
}
cls_inst_sm = Func("SomeClass.static", ["flarg"], None, None)
cls_inst_sm_ir = {
"sets": {
Name("flarg.attr_in_static", "flarg"),
},
"gets": set(),
"dels": set(),
"calls": set(),
}
fn = Func("i_call_them", ["marg"], None, None)
fn_ir = {
"sets": {
Name("instance"),
},
"gets": set(),
"dels": set(),
"calls": {
Call("SomeClass()", ["instance", "marg"], {}, target=cls_inst),
Call("SomeClass.static()", ["marg"], {}, target=cls_inst_sm),
},
}
file_ir = {
cls_inst: cls_inst_ir,
cls_inst_sm: cls_inst_sm_ir,
fn: fn_ir,
}
expected = {
"SomeClass": {
"sets": {"self.my_attr"},
"gets": {"arg.attr_in_init"},
"dels": set(),
"calls": set(),
},
"SomeClass.static": {
"sets": {"flarg.attr_in_static"},
"gets": set(),
"dels": set(),
"calls": set(),
},
"i_call_them": {
"sets": {
"instance",
"instance.my_attr",
"marg.attr_in_static",
},
"gets": {
"marg.attr_in_init",
},
"dels": set(),
"calls": {
"SomeClass()",
"SomeClass.static()",
},
},
}
assert generate_results_from_ir(file_ir, dict()) == expected
| 13,664
| -3
| 265
|
1ee8b3b441e3e386e5383b05e72b69bbd71f1a7d
| 15,586
|
py
|
Python
|
noname.py
|
sxnxhxrxkx/nonamechan
|
4830ee9852b790ae46d66bc1ed356b3f1b0a8404
|
[
"MIT"
] | 1
|
2018-11-04T14:19:14.000Z
|
2018-11-04T14:19:14.000Z
|
noname.py
|
sxnxhxrxkx/nonamechan
|
4830ee9852b790ae46d66bc1ed356b3f1b0a8404
|
[
"MIT"
] | null | null | null |
noname.py
|
sxnxhxrxkx/nonamechan
|
4830ee9852b790ae46d66bc1ed356b3f1b0a8404
|
[
"MIT"
] | null | null | null |
# Work with Python 3.6
import discord
import numpy as np
import pandas as pd
import random
import subprocess
import weather as wt
from nlu_yahoo import nluservice
from MorseCode import morse
# from wc import noname_wc
#import softalk as sf
from calender import getCalender, getCalLink, getCommandList
from news import getNews
from matchbattle.map import getTargetMap
import noname_vocabulary as nnm
import traceback
from logger import writelog
from logger import nonamelog
from bs4 import BeautifulSoup
import requests
import configparser
config = configparser.ConfigParser()
config.read('noname.ini')
TOKEN = config['noname']['TOKEN']
client = discord.Client()
@client.event
@client.event
client.run(TOKEN)
| 40.801047
| 171
| 0.588926
|
# Work with Python 3.6
import discord
import numpy as np
import pandas as pd
import random
import subprocess
import weather as wt
from nlu_yahoo import nluservice
from MorseCode import morse
# from wc import noname_wc
#import softalk as sf
from calender import getCalender, getCalLink, getCommandList
from news import getNews
from matchbattle.map import getTargetMap
import noname_vocabulary as nnm
import traceback
from logger import writelog
from logger import nonamelog
from bs4 import BeautifulSoup
import requests
import configparser
config = configparser.ConfigParser()
config.read('noname.ini')
TOKEN = config['noname']['TOKEN']
client = discord.Client()
def getUser(message):
usrname = str(message.author)
return usrname
@client.event
async def on_message(message):
msg = ''
if message.author == client.user:
return
try:
if message.content.__contains__('さしすせそ'):
msg = nnm.sasisuseso(msg)
nonamelog(getUser(message),'sasisuseso', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('世界一かわいい'):
msg = nnm.okoku(msg)
nonamelog(getUser(message),'okoku', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('ほめて') or message.content.__contains__('褒めて') :
msg += nnm.homete(msg)
nonamelog(getUser(message),'homete', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('ばとうして') or message.content.__contains__('罵倒して') or message.content.__contains__('おしっこ') or message.content.__contains__('!ちんちん'):
msg += nnm.batou(msg)
nonamelog(getUser(message),'batou', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('疲れた') or message.content.__contains__('つかれた'):
msg += nnm.tsukareta(msg)
nonamelog(getUser(message),'tsukareta', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('許して') or message.content.__contains__('ゆるして') or message.content.__contains__('許されたい'):
msg += nnm.yurusite(msg)
nonamelog(getUser(message),'yurusite', message.content)
await message.channel.send( msg)
if message.content.__contains__('頑張った') or message.content.__contains__('がんばった'):
msg += nnm.ganbatta(msg)
nonamelog(getUser(message),'ganbatta', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('応援'):
msg += nnm.ouen(msg)
nonamelog(getUser(message),'ouen', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('励まして') or message.content.__contains__('はげまして'):
msg += nnm.hagemasu(msg)
nonamelog(getUser(message),'hagemasu', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('頑張る') or message.content.__contains__('がんばる') or message.content.__contains__('がんがる') or message.content.__contains__('ガンガル'):
msg += nnm.ganbaru(msg)
nonamelog(getUser(message),'ganbaru', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('はじめまして'):
msg += 'はじめまして! {0.author.mention}'.format(message) + 'さん!'
msg += nnm.information(msg)
msg += '楽しんでいってくださいね!'
nonamelog(getUser(message),'information', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('!help') or message.content.__contains__('!ヘルプ'):
msg += nnm.nonamehelp(msg)
nonamelog(getUser(message),'nonamehelp', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('hello'):
msg += 'Hello {0.author.mention}'.format(message)
nonamelog(getUser(message),'hello', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('こんにちわ'):
msg += 'こんにちわ! {0.author.mention}'.format(message) + 'さん!'
nonamelog(getUser(message),'hello', message.content)
await message.channel.send( msg)
return
if message.content.startswith('おはよ'):
msg += nnm.ohayo(msg)
nonamelog(getUser(message),'ohayo', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('おやす'):
msg += nnm.oyasumi(msg)
nonamelog(getUser(message),'oyasumi', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('!ありがと'):
msg += nnm.arigato(msg)
nonamelog(getUser(message),'arigato', message.content)
await message.channel.send( msg)
return
if message.content.__contains__('おつ') or message.content.__contains__('お疲れ') or message.content.__contains__('!おつ'):
msg += nnm.otsu(msg)
nonamelog(getUser(message),'otsu', message.content)
await message.channel.send( msg)
if message.content.__contains__('!ぬるぽ') or message.content.__contains__('ぬるぽ'):
msg += nnm.nurupo(msg)
nonamelog(getUser(message),'nurupo', message.content)
await message.channel.send( msg)
if message.content.__contains__('!しりとり'):
msg += 'しりとり、ですか?現在、その機能は使われておりません。ピピー!'
await message.channel.send( msg)
if message.content.__contains__('!占い') or message.content.__contains__('!運勢'):
msg = 'じゃじゃーーん!!今日の運勢!ですね!'
await message.channel.send( msg)
msg, negaposi = nnm.uranai(msg)
await message.channel.send( msg)
msg = nnm.luckynum(msg, negaposi)
await message.channel.send( msg)
msg = nnm.luckycolor(msg, negaposi)
await message.channel.send( msg)
msg = nnm.advice(msg, negaposi)
nonamelog(getUser(message),'uranai', message.content)
await message.channel.send( msg)
if message.content.__contains__('!ニンジャ') or message.content.__contains__('ニンジャ'):
msg = nnm.ninja(msg,getUser(message))
nonamelog(getUser(message),'ninja', message.content)
await message.channel.send( msg)
if message.content.startswith('ふぃんだー') or message.content.startswith('フィンダー'):
msg = nnm.finder(msg,getUser(message))
nonamelog(getUser(message),'finder', message.content)
await message.channel.send( msg)
if message.content.startswith('!dice') or message.content.startswith('!サイコロ') or message.content.startswith('!ダイス'):
if message.content.startswith('!dicegame') or message.content.startswith('!サイコロ勝負') or message.content.startswith('!ダイス勝負'):
msg = nnm.dicegame(msg, message.content)
nonamelog(getUser(message),'dicegame', message.content)
else:
msg = nnm.somedice(msg, message.content)
nonamelog(getUser(message),'somedice', message.content)
await message.channel.send( msg)
if message.content.startswith('!ちんちろ') or message.content.startswith('!チンチロ'):
msg = "ちんちろりんだね!役が出るまで3回サイコロを振るよ!"
await message.channel.send( msg)
cnt = 1
msg = "1投目を振るよ!"
await message.channel.send( msg)
msg, score, yaku, result_str, reaction = nnm.tintiro(msg, message.content)
await message.channel.send( msg)
if yaku == "目なし":
msg = "2投目を振るよ!"
await message.channel.send( msg)
msg, score, yaku, result_str, reaction = nnm.tintiro(msg, message.content)
await message.channel.send( msg)
cnt += 1
if yaku == "目なし":
msg = "これが最後のチャンスだよ!"
await message.channel.send( msg)
msg, score, yaku, result_str, reaction = nnm.tintiro(msg, message.content)
await message.channel.send( msg)
cnt += 1
if yaku == "目なし":
msg = '最後まで目なし…あまくないね!あなたの負けだよ!'
await message.channel.send( msg)
elif score >= 1:
msg = '役を出すことができて何よりだよ!'
await message.channel.send( msg)
# msg += 'はじめまして! {0.author.mention}'.format(message) + 'さん!'
msg = '結果をまとめるね!'
await message.channel.send( msg)
# ヘッダ
embed = discord.Embed(title="---ちんちろりん結果---", description='プレイヤ:{0.author.mention}'.format(message))
embed.add_field(name="投数", value=str(cnt) + '投')
embed.add_field(name="役", value=yaku)
embed.add_field(name="スコア", value=str(score))
await message.channel.send(embed=embed)
msg = 'また遊んでね!'
await message.channel.send( msg)
nonamelog(getUser(message),'tintiro', message.content)
return
if message.content.startswith('!マップ') or message.content.startswith('!map'):
msg = "対戦マップを取得するね!"
await message.channel.send( msg)
map_name, map_path = getTargetMap()
await message.channel.send(map_path)
# ヘッダ
embed = discord.Embed(title=map_name)#, description=map_name)
#embed.set_image(url=map_path)
# embed.add_field(name="マップ", value=str(map_name))
# embed.add_field(name="役", value=yaku)
# embed.add_field(name="スコア", value=str(score))
await message.channel.send(embed=embed)
msg = 'さぁ!楽しみだね!'
await message.channel.send( msg)
nonamelog(getUser(message),'map', message.content)
return
if message.content.startswith('!天気'):
#msg += wt.weather()
msg += wt.weather_geo(msg, message.content)
nonamelog(getUser(message),'weather', message.content)
await message.channel.send( msg)
if message.content.startswith('!モールス') or message.content.startswith('!もーるす'):
msg += morse(msg, message.content)
nonamelog(getUser(message),'morse', message.content)
await message.channel.send( msg)
if message.content.startswith('!nlu'):
msg += nluservice(msg, message.content)
nonamelog(getUser(message),'nluservice', message.content)
await message.channel.send( msg)
if message.content.startswith('!dec2bin'):
msg += nnm.dec2bin(msg, message.content)
nonamelog(getUser(message),'dec2bin', message.content)
await message.channel.send( msg)
if message.content.startswith('!dec2hex'):
msg += nnm.dec2hex(msg, message.content)
nonamelog(getUser(message),'dec2hex', message.content)
await message.channel.send( msg)
if message.content.startswith('!bin2dec'):
msg += nnm.bin2dec(msg, message.content)
nonamelog(getUser(message),'bin2dec', message.content)
await message.channel.send( msg)
if message.content.startswith('!hex2dec'):
msg += nnm.hex2dec(msg, message.content)
nonamelog(getUser(message),'hex2dec', message.content)
await message.channel.send( msg)
if message.content.startswith('!半濁音'):
msg += nnm.handakuon(msg, message.content)
nonamelog(getUser(message),'handakuon', message.content)
await message.channel.send( msg)
if message.content.startswith('!濁音'):
msg += nnm.dakuon(msg, message.content)
nonamelog(getUser(message),'dakuon', message.content)
await message.channel.send( msg)
if message.content.startswith('!リピート'):
msg += nnm.repeat(msg, message.content)
nonamelog(getUser(message),'repeat', message.content)
await message.channel.send( msg)
# 幻影戦争イベントカレンダーのリンク取得
if message.content.startswith('!カレンダリンク'):
nonamelog(getUser(message),'cal', message.content)
msg = "幻影戦争のイベントカレンダーは以下のリンクだよ!"
await message.channel.send( msg)
msg = getCalLink(msg)
await message.channel.send( msg)
msg = "編集したい場合はここを見てね。参考になったかな?"
await message.channel.send( msg)
return
# 幻影戦争イベントカレンダーの取得
if message.content.startswith('!カレンダ'):
nonamelog(getUser(message),'cal', message.content)
msg, df = getCalender( msg, message.content)
await message.channel.send( msg)
msg = "--------------------------------------"
await message.channel.send( msg)
for index, row in df.iterrows():
msg = row['start'].strftime('%m/%d %H') + "' - " + row['end'].strftime('%m/%d %H') + "'" + " " + row['category'] + " " + row['event']
await message.channel.send( msg)
msg = "--------------------------------------"
await message.channel.send( msg)
msg = getCommandList(msg)
await message.channel.send( msg)
return
# 幻影戦争お知らせの取得
if message.content.startswith('!お知らせ'):
nonamelog(getUser(message),'news', message.content)
url_org = "https://players.wotvffbe.com"
url = url_org + "/all/"
msg = "幻影戦争のお知らせを取得するよ!"
await message.channel.send( msg)
msg, df = getNews(msg, message.content)
await message.channel.send( msg)
# ヘッダ
embed = discord.Embed(title="FFBE 幻影戦争 お知らせ", description=f"FFBE 幻影戦争 のお知らせのURLは [こちら]({url}) です!")
for index, row in df.iterrows():
time = row['time']
content = row['content']
link = row['link']
embed.add_field(name=time, value="[" + content +"](" + link + ")",inline=False)
await message.channel.send(embed=embed)
return
# if message.content.startswith('!wc'):
# msg += noname_wc(msg, message.content)
# nonamelog(getUser(message),'wc', message.content)
# await client.send_file(message.channel, "temp.png", content="スクレイピングしたよ!", filename="send.png")
# add -------
if message.content.__contains__('のなめ') or message.content.__contains__('noname'):
msg = nnm.noname(msg)
nonamelog(getUser(message),'noname', message.content)
await message.channel.send( msg)
if message.content.startswith('exit'):
await client.logout()
except:
msg += 'エラー。んん、、なんかおかしいかも。。logを出すね。。'
nonamelog(getUser(message),'error', message.content)
await message.channel.send( msg)
msg = traceback.format_exc()
await message.channel.send( msg)
#sf.talk(msg)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN)
| 16,119
| 0
| 67
|
e033047e653a5d284cd1d9c31bd1fb747d758d2a
| 5,522
|
py
|
Python
|
tensorflow_federated/python/core/backends/iree/compiler.py
|
truthiswill/federated
|
d25eeac036dfc2a485120a195fd904223cfc823a
|
[
"Apache-2.0"
] | 1
|
2022-02-08T01:11:14.000Z
|
2022-02-08T01:11:14.000Z
|
tensorflow_federated/python/core/backends/iree/compiler.py
|
truthiswill/federated
|
d25eeac036dfc2a485120a195fd904223cfc823a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/backends/iree/compiler.py
|
truthiswill/federated
|
d25eeac036dfc2a485120a195fd904223cfc823a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A collection of utilities for compiling TFF code for execution on IREE."""
import tempfile
import iree.compiler.tf
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.core.backends.iree import computation_module
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
def import_tensorflow_computation(comp, name='fn'):
"""Creates a `computation_module.ComputationModule` from a TF computation.
WARNING: This helper function is under construction, and most capabilities are
not implemented at this stage:
* The parameter and result of `comp` can only be a single tensor. Named
tuples, sequences, or functional types are not currently supported.
* Only tensorflow code can be imported.
TODO(b/153499219): Add support for named tuples, sequences, and functions.
Args:
comp: An instance of a `pb.Computation` with TensorFlow code to import.
name: An optional `str` name of the (single) function in the IREE module.
Returns:
An instance of `Module` with the imported function present.
Raises:
TypeError: If arguments are of the wrong types, e.g., in `comp` is not a
TensorFlow computation.
"""
py_typecheck.check_type(comp, pb.Computation)
type_spec = type_serialization.deserialize_type(comp.type)
if not type_spec.is_function():
type_spec = computation_types.FunctionType(None, type_spec)
# TODO(b/153499219): Replace this with a recursive check of the signature
# after relaxing the type restrictions and introducing nested structures.
py_typecheck.check_type(type_spec.result, computation_types.TensorType)
if type_spec.parameter is not None:
py_typecheck.check_type(type_spec.parameter, computation_types.TensorType)
which_computation = comp.WhichOneof('computation')
if which_computation != 'tensorflow':
raise TypeError('Expected a TensorFlow computation, found {}.'.format(
which_computation))
output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.result)
if type_spec.parameter is not None:
input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.parameter)
else:
input_tensor_names = []
graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def)
init_op = comp.tensorflow.initialize_op
return_elements = input_tensor_names + output_tensor_names
if init_op:
graph_def = tensorflow_utils.add_control_deps_for_init_op(
graph_def, init_op)
return_elements.append(init_op)
with tf.Graph().as_default() as graph:
# TODO(b/153499219): See if we can reintroduce uniquify_shared_names().
# Right now, it causes loader breakage, and unclear if still necessary.
import_results = tf.graph_util.import_graph_def(
graph_def, input_map={}, return_elements=return_elements, name='')
if init_op:
initializer = import_results[-1]
import_results.pop()
else:
initializer = None
inputs = import_results[0:len(input_tensor_names)]
outputs = import_results[len(input_tensor_names):]
with graph.as_default():
# TODO(b/153499219): Find a way to reflect the nested parameter and result
# structure here after relaxing the restrictions.
if inputs:
assert len(inputs) < 2
input_dict = {
'parameter':
tf.compat.v1.saved_model.utils.build_tensor_info(inputs[0])
}
else:
input_dict = {}
assert len(outputs) == 1
output_dict = {
'result': tf.compat.v1.saved_model.utils.build_tensor_info(outputs[0])
}
sig_def = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs=input_dict, outputs=output_dict, method_name=name)
with tempfile.TemporaryDirectory() as model_dir:
builder = tf.compat.v1.saved_model.Builder(model_dir)
with tf.compat.v1.Session(graph=graph) as sess:
builder.add_meta_graph_and_variables(
sess, ['unused'],
signature_def_map={name: sig_def},
legacy_init_op=initializer,
strip_default_attrs=True)
builder.save()
iree_module = iree.compiler.tf.compile_saved_model(
model_dir,
import_type='SIGNATURE_DEF',
import_only=True,
saved_model_tags=set(['unused']),
exported_names=[name])
return computation_module.ComputationModule(iree_module, name, type_spec)
| 39.726619
| 80
| 0.74828
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A collection of utilities for compiling TFF code for execution on IREE."""
import tempfile
import iree.compiler.tf
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.core.backends.iree import computation_module
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
def import_tensorflow_computation(comp, name='fn'):
"""Creates a `computation_module.ComputationModule` from a TF computation.
WARNING: This helper function is under construction, and most capabilities are
not implemented at this stage:
* The parameter and result of `comp` can only be a single tensor. Named
tuples, sequences, or functional types are not currently supported.
* Only tensorflow code can be imported.
TODO(b/153499219): Add support for named tuples, sequences, and functions.
Args:
comp: An instance of a `pb.Computation` with TensorFlow code to import.
name: An optional `str` name of the (single) function in the IREE module.
Returns:
An instance of `Module` with the imported function present.
Raises:
TypeError: If arguments are of the wrong types, e.g., in `comp` is not a
TensorFlow computation.
"""
py_typecheck.check_type(comp, pb.Computation)
type_spec = type_serialization.deserialize_type(comp.type)
if not type_spec.is_function():
type_spec = computation_types.FunctionType(None, type_spec)
# TODO(b/153499219): Replace this with a recursive check of the signature
# after relaxing the type restrictions and introducing nested structures.
py_typecheck.check_type(type_spec.result, computation_types.TensorType)
if type_spec.parameter is not None:
py_typecheck.check_type(type_spec.parameter, computation_types.TensorType)
which_computation = comp.WhichOneof('computation')
if which_computation != 'tensorflow':
raise TypeError('Expected a TensorFlow computation, found {}.'.format(
which_computation))
output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.result)
if type_spec.parameter is not None:
input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.parameter)
else:
input_tensor_names = []
graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def)
init_op = comp.tensorflow.initialize_op
return_elements = input_tensor_names + output_tensor_names
if init_op:
graph_def = tensorflow_utils.add_control_deps_for_init_op(
graph_def, init_op)
return_elements.append(init_op)
with tf.Graph().as_default() as graph:
# TODO(b/153499219): See if we can reintroduce uniquify_shared_names().
# Right now, it causes loader breakage, and unclear if still necessary.
import_results = tf.graph_util.import_graph_def(
graph_def, input_map={}, return_elements=return_elements, name='')
if init_op:
initializer = import_results[-1]
import_results.pop()
else:
initializer = None
inputs = import_results[0:len(input_tensor_names)]
outputs = import_results[len(input_tensor_names):]
with graph.as_default():
# TODO(b/153499219): Find a way to reflect the nested parameter and result
# structure here after relaxing the restrictions.
if inputs:
assert len(inputs) < 2
input_dict = {
'parameter':
tf.compat.v1.saved_model.utils.build_tensor_info(inputs[0])
}
else:
input_dict = {}
assert len(outputs) == 1
output_dict = {
'result': tf.compat.v1.saved_model.utils.build_tensor_info(outputs[0])
}
sig_def = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs=input_dict, outputs=output_dict, method_name=name)
with tempfile.TemporaryDirectory() as model_dir:
builder = tf.compat.v1.saved_model.Builder(model_dir)
with tf.compat.v1.Session(graph=graph) as sess:
builder.add_meta_graph_and_variables(
sess, ['unused'],
signature_def_map={name: sig_def},
legacy_init_op=initializer,
strip_default_attrs=True)
builder.save()
iree_module = iree.compiler.tf.compile_saved_model(
model_dir,
import_type='SIGNATURE_DEF',
import_only=True,
saved_model_tags=set(['unused']),
exported_names=[name])
return computation_module.ComputationModule(iree_module, name, type_spec)
| 0
| 0
| 0
|
39571e94baa89811230fb0af0126de9bc9159675
| 911
|
py
|
Python
|
legacy/apps/smokegen_blob.py
|
tailintalent/PDE-Control
|
7031909188e7ce217da2b1628236011d1dff161a
|
[
"MIT"
] | 22
|
2020-04-27T12:48:32.000Z
|
2022-03-23T10:41:48.000Z
|
legacy/apps/smokegen_blob.py
|
tailintalent/PDE-Control
|
7031909188e7ce217da2b1628236011d1dff161a
|
[
"MIT"
] | 5
|
2020-12-18T14:19:23.000Z
|
2022-01-22T18:29:27.000Z
|
legacy/apps/smokegen_blob.py
|
tailintalent/PDE-Control
|
7031909188e7ce217da2b1628236011d1dff161a
|
[
"MIT"
] | 3
|
2021-05-29T23:30:53.000Z
|
2022-02-14T06:30:32.000Z
|
from phi.fluidformat import *
# for scene in scenes("~/data/control/squares"):
# scene.remove()
scenecount = 1000
for scene_index in range(scenecount):
scene = new_scene("~/data/control/squares")
start_x, start_y, end_x, end_y = np.random.randint(10, 110, 4)
print(scene)
scenelength = 32
vx = (end_x-start_x) / float(scenelength)
vy = (end_y-start_y) / float(scenelength)
for frame in range(scenelength+1):
time = frame / float(scenelength)
array = np.zeros([128, 128, 1], np.float32)
x = int(round(start_x * (1-time) + end_x * time))
y = int(round(start_y * (1-time) + end_y * time))
array[y:y+8, x:x+8, :] = 1
velocity_array = np.empty([129, 129, 2], np.float32)
velocity_array[...,0] = vx
velocity_array[...,1] = vy
write_sim_frame(scene.path, [array, velocity_array], ["Density", "Velocity"], frame)
| 37.958333
| 92
| 0.614709
|
from phi.fluidformat import *
# for scene in scenes("~/data/control/squares"):
# scene.remove()
scenecount = 1000
for scene_index in range(scenecount):
scene = new_scene("~/data/control/squares")
start_x, start_y, end_x, end_y = np.random.randint(10, 110, 4)
print(scene)
scenelength = 32
vx = (end_x-start_x) / float(scenelength)
vy = (end_y-start_y) / float(scenelength)
for frame in range(scenelength+1):
time = frame / float(scenelength)
array = np.zeros([128, 128, 1], np.float32)
x = int(round(start_x * (1-time) + end_x * time))
y = int(round(start_y * (1-time) + end_y * time))
array[y:y+8, x:x+8, :] = 1
velocity_array = np.empty([129, 129, 2], np.float32)
velocity_array[...,0] = vx
velocity_array[...,1] = vy
write_sim_frame(scene.path, [array, velocity_array], ["Density", "Velocity"], frame)
| 0
| 0
| 0
|
59c7971bdd0fbc42647fd2f3df68d614494ae493
| 12,561
|
py
|
Python
|
mango/marketmaking/modelstatebuilderfactory.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
mango/marketmaking/modelstatebuilderfactory.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
mango/marketmaking/modelstatebuilderfactory.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import enum
import mango
import typing
from solana.publickey import PublicKey
from ..constants import SYSTEM_PROGRAM_ADDRESS
from ..modelstate import ModelState
from .modelstatebuilder import (
ModelStateBuilder,
WebsocketModelStateBuilder,
SerumPollingModelStateBuilder,
SpotPollingModelStateBuilder,
PerpPollingModelStateBuilder,
)
# # 🥭 ModelStateBuilder class
#
# Base class for building a `ModelState` through polling or websockets.
#
| 33.857143
| 124
| 0.670727
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import enum
import mango
import typing
from solana.publickey import PublicKey
from ..constants import SYSTEM_PROGRAM_ADDRESS
from ..modelstate import ModelState
from .modelstatebuilder import (
ModelStateBuilder,
WebsocketModelStateBuilder,
SerumPollingModelStateBuilder,
SpotPollingModelStateBuilder,
PerpPollingModelStateBuilder,
)
class ModelUpdateMode(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
WEBSOCKET = "WEBSOCKET"
POLL = "POLL"
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
# # 🥭 ModelStateBuilder class
#
# Base class for building a `ModelState` through polling or websockets.
#
def model_state_builder_factory(
mode: ModelUpdateMode,
context: mango.Context,
disposer: mango.Disposable,
websocket_manager: mango.WebSocketSubscriptionManager,
health_check: mango.HealthCheck,
wallet: mango.Wallet,
group: mango.Group,
account: mango.Account,
market: mango.LoadedMarket,
oracle: mango.Oracle,
) -> ModelStateBuilder:
if mode == ModelUpdateMode.WEBSOCKET:
return _websocket_model_state_builder_factory(
context,
disposer,
websocket_manager,
health_check,
wallet,
group,
account,
market,
oracle,
)
else:
return _polling_model_state_builder_factory(
context, wallet, group, account, market, oracle
)
def _polling_model_state_builder_factory(
context: mango.Context,
wallet: mango.Wallet,
group: mango.Group,
account: mango.Account,
market: mango.Market,
oracle: mango.Oracle,
) -> ModelStateBuilder:
if mango.SerumMarket.isa(market):
return _polling_serum_model_state_builder_factory(
context, wallet, group, account, mango.SerumMarket.ensure(market), oracle
)
elif mango.SpotMarket.isa(market):
return _polling_spot_model_state_builder_factory(
group, account, mango.SpotMarket.ensure(market), oracle
)
elif mango.PerpMarket.isa(market):
return _polling_perp_model_state_builder_factory(
group, account, mango.PerpMarket.ensure(market), oracle
)
else:
raise Exception(
f"Could not determine type of market {market.fully_qualified_symbol}: {market}"
)
def _polling_serum_model_state_builder_factory(
context: mango.Context,
wallet: mango.Wallet,
group: mango.Group,
account: mango.Account,
market: mango.SerumMarket,
oracle: mango.Oracle,
) -> ModelStateBuilder:
base_account = mango.TokenAccount.fetch_largest_for_owner_and_token(
context, wallet.address, market.base
)
if base_account is None:
raise Exception(
f"Could not find token account owned by {wallet.address} for base token {market.base}."
)
quote_account = mango.TokenAccount.fetch_largest_for_owner_and_token(
context, wallet.address, market.quote
)
if quote_account is None:
raise Exception(
f"Could not find token account owned by {wallet.address} for quote token {market.quote}."
)
all_open_orders = mango.OpenOrders.load_for_market_and_owner(
context,
market.address,
wallet.address,
context.serum_program_address,
market.base,
market.quote,
)
if len(all_open_orders) == 0:
raise Exception(
f"Could not find serum openorders account owned by {wallet.address} for market {market.fully_qualified_symbol}."
)
return SerumPollingModelStateBuilder(
all_open_orders[0].address,
market,
oracle,
group.address,
group.cache,
account.address,
all_open_orders[0].address,
base_account,
quote_account,
)
def _polling_spot_model_state_builder_factory(
group: mango.Group,
account: mango.Account,
market: mango.SpotMarket,
oracle: mango.Oracle,
) -> ModelStateBuilder:
market_index: int = group.slot_by_spot_market_address(market.address).index
open_orders_address: typing.Optional[PublicKey] = account.spot_open_orders_by_index[
market_index
]
all_open_orders_addresses: typing.Sequence[PublicKey] = account.spot_open_orders
if open_orders_address is None:
raise Exception(
f"Could not find spot openorders in account {account.address} for market {market.fully_qualified_symbol}."
)
return SpotPollingModelStateBuilder(
open_orders_address,
market,
oracle,
group.address,
group.cache,
account.address,
open_orders_address,
all_open_orders_addresses,
)
def _polling_perp_model_state_builder_factory(
group: mango.Group,
account: mango.Account,
market: mango.PerpMarket,
oracle: mango.Oracle,
) -> ModelStateBuilder:
all_open_orders_addresses: typing.Sequence[PublicKey] = account.spot_open_orders
return PerpPollingModelStateBuilder(
account.address,
market,
oracle,
group.address,
group.cache,
all_open_orders_addresses,
)
def __load_all_openorders_watchers(
context: mango.Context,
wallet: mango.Wallet,
account: mango.Account,
group: mango.Group,
websocket_manager: mango.WebSocketSubscriptionManager,
health_check: mango.HealthCheck,
) -> typing.Sequence[mango.Watcher[mango.OpenOrders]]:
all_open_orders_watchers: typing.List[mango.Watcher[mango.OpenOrders]] = []
for basket_token in account.base_slots:
if basket_token.spot_open_orders is not None:
spot_market_symbol: str = f"spot:{basket_token.base_instrument.symbol}/{account.shared_quote_token.symbol}"
spot_market = mango.SpotMarket.ensure(
mango.market(context, spot_market_symbol)
)
oo_watcher = mango.build_spot_open_orders_watcher(
context,
websocket_manager,
health_check,
wallet,
account,
group,
spot_market,
)
all_open_orders_watchers += [oo_watcher]
return all_open_orders_watchers
def _websocket_model_state_builder_factory(
context: mango.Context,
disposer: mango.Disposable,
websocket_manager: mango.WebSocketSubscriptionManager,
health_check: mango.HealthCheck,
wallet: mango.Wallet,
group: mango.Group,
account: mango.Account,
market: mango.LoadedMarket,
oracle: mango.Oracle,
) -> ModelStateBuilder:
group_watcher = mango.build_group_watcher(
context, websocket_manager, health_check, group
)
cache = mango.Cache.load(context, group.cache)
cache_watcher = mango.build_cache_watcher(
context, websocket_manager, health_check, cache, group
)
account_subscription, latest_account_observer = mango.build_account_watcher(
context, websocket_manager, health_check, account, group_watcher, cache_watcher
)
initial_price = oracle.fetch_price(context)
price_feed = oracle.to_streaming_observable(context)
latest_price_observer = mango.LatestItemObserverSubscriber(initial_price)
price_disposable = price_feed.subscribe(latest_price_observer)
disposer.add_disposable(price_disposable)
health_check.add("price_subscription", price_feed)
if mango.SerumMarket.isa(market):
serum_market = mango.SerumMarket.ensure(market)
order_owner: PublicKey = (
serum_market.find_openorders_address_for_owner(context, wallet.address)
or SYSTEM_PROGRAM_ADDRESS
)
price_watcher: mango.Watcher[mango.Price] = mango.build_price_watcher(
context, websocket_manager, health_check, disposer, "market", serum_market
)
inventory_watcher: mango.Watcher[
mango.Inventory
] = mango.build_serum_inventory_watcher(
context,
websocket_manager,
health_check,
disposer,
wallet,
serum_market,
price_watcher,
)
latest_open_orders_observer: mango.Watcher[
mango.PlacedOrdersContainer
] = mango.build_serum_open_orders_watcher(
context, websocket_manager, health_check, serum_market, wallet
)
latest_orderbook_watcher: mango.Watcher[
mango.OrderBook
] = mango.build_orderbook_watcher(
context, websocket_manager, health_check, serum_market
)
latest_event_queue_watcher: mango.Watcher[
mango.EventQueue
] = mango.build_serum_event_queue_watcher(
context, websocket_manager, health_check, serum_market
)
elif mango.SpotMarket.isa(market):
spot_market = mango.SpotMarket.ensure(market)
market_index: int = group.slot_by_spot_market_address(spot_market.address).index
order_owner = (
account.spot_open_orders_by_index[market_index] or SYSTEM_PROGRAM_ADDRESS
)
all_open_orders_watchers = __load_all_openorders_watchers(
context, wallet, account, group, websocket_manager, health_check
)
latest_open_orders_observer = list(
[
oo_watcher
for oo_watcher in all_open_orders_watchers
if (
spot_market.base == spot_market.base
and spot_market.quote == spot_market.quote
)
]
)[0]
inventory_watcher = mango.InventoryAccountWatcher(
spot_market,
latest_account_observer,
group_watcher,
all_open_orders_watchers,
cache_watcher,
)
latest_orderbook_watcher = mango.build_orderbook_watcher(
context, websocket_manager, health_check, spot_market
)
latest_event_queue_watcher = mango.build_spot_event_queue_watcher(
context, websocket_manager, health_check, spot_market
)
elif mango.PerpMarket.isa(market):
perp_market = mango.PerpMarket.ensure(market)
order_owner = account.address
all_open_orders_watchers = __load_all_openorders_watchers(
context, wallet, account, group, websocket_manager, health_check
)
inventory_watcher = mango.InventoryAccountWatcher(
perp_market,
latest_account_observer,
group_watcher,
all_open_orders_watchers,
cache_watcher,
)
latest_open_orders_observer = mango.build_perp_open_orders_watcher(
context,
websocket_manager,
health_check,
perp_market,
account,
group,
account_subscription,
)
latest_orderbook_watcher = mango.build_orderbook_watcher(
context, websocket_manager, health_check, perp_market
)
latest_event_queue_watcher = mango.build_perp_event_queue_watcher(
context, websocket_manager, health_check, perp_market
)
else:
raise Exception(
f"Could not determine type of market {market.fully_qualified_symbol} - {market}"
)
model_state = ModelState(
order_owner,
market,
group_watcher,
latest_account_observer,
latest_price_observer,
latest_open_orders_observer,
inventory_watcher,
latest_orderbook_watcher,
latest_event_queue_watcher,
)
return WebsocketModelStateBuilder(model_state)
| 10,922
| 190
| 183
|
5d4633ef46069b638eb4cd14d524fd4a344a3b49
| 2,753
|
py
|
Python
|
backend/tomato/lib/cmd/bittorrent.py
|
dswd/ToMaTo
|
355fd3a8c7f95dc72c62383b3edfa8f6c0396bf4
|
[
"BSD-4-Clause-UC"
] | 2
|
2016-11-10T06:12:05.000Z
|
2016-11-10T06:12:10.000Z
|
hostmanager/tomato/lib/cmd/bittorrent.py
|
dswd/ToMaTo
|
355fd3a8c7f95dc72c62383b3edfa8f6c0396bf4
|
[
"BSD-4-Clause-UC"
] | 2
|
2015-01-19T16:00:24.000Z
|
2015-01-20T11:33:56.000Z
|
backend/tomato/lib/cmd/bittorrent.py
|
dswd/ToMaTo
|
355fd3a8c7f95dc72c62383b3edfa8f6c0396bf4
|
[
"BSD-4-Clause-UC"
] | 1
|
2016-11-10T06:12:15.000Z
|
2016-11-10T06:12:15.000Z
|
# -*- coding: utf-8 -*-
# ToMaTo (Topology management software)
# Copyright (C) 2010 Dennis Schwerdel, University of Kaiserslautern
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from . import run, spawn, CommandError, process
from .. import util
from ... import config
import os
_clientPid = None
_clientConfig = {}
_trackerPid = None
| 30.588889
| 155
| 0.726843
|
# -*- coding: utf-8 -*-
# ToMaTo (Topology management software)
# Copyright (C) 2010 Dennis Schwerdel, University of Kaiserslautern
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from . import run, spawn, CommandError, process
from .. import util
from ... import config
import os
_clientPid = None
_clientConfig = {}
_trackerPid = None
def startTracker(port, path):
global _trackerPid
if _trackerPid:
return
assert os.path.exists(path)
usage = run(["bttrack"])
args = ["bttrack", "--port", str(port), "--dfile", os.path.join(path, "tracker.cache"), "--allowed_dir", path]
if "--parse_allowed_interval" in usage: #bittorrent
args += ["--parse_allowed_interval", "1"] #minutes
elif "--parse_dir_interval" in usage: #bittornado
args += ["--parse_dir_interval", "60"] #seconds
_trackerPid = spawn(args)
def stopTracker():
global _trackerPid
process.kill(_trackerPid)
_trackerPid = None
def torrentInfo(torrentData):
from BitTorrent.bencode import bdecode
info = bdecode(torrentData)["info"]
return info
def fileSize(torrentData):
info = torrentInfo(torrentData)
if info.has_key('length'):
return info["length"]
file_length = 0
for file in info['files']:
path = ''
for item in file['path']:
if (path != ''):
path = path + "/"
path = path + item
file_length += file['length']
return file_length
def startClient(path, bwlimit=10000, minport=8010, maxport=8020):
global _clientPid, _clientConfig
if _clientPid:
return
assert os.path.exists(path)
_clientConfig = {"path": path, "bwlimit": bwlimit}
_clientPid = spawn(["btlaunchmany", ".", "--max_upload_rate", str(bwlimit), "--minport", str(minport), "--maxport", str(maxport)], cwd=path, daemon=False)
try:
process.ionice(_clientPid, process.IoPolicy.Idle)
except:
pass #no essential command
def restartClient():
global _clientPid, _clientConfig
if _clientPid:
stopClient()
startClient(**_clientConfig)
def stopClient():
global _clientPid
process.kill(_clientPid)
_clientPid = None
def createTorrent(tracker, dataPath, torrentPath=""):
assert os.path.exists(dataPath)
return run(["btmakemetafile", tracker, dataPath, "--target", torrentPath])
| 1,641
| 0
| 185
|
d12742fbf604ea579708c7757f0f38ebaac260d8
| 667
|
py
|
Python
|
hackathons/migrations/0014_auto_20190703_0453.py
|
Tookmund/hackerforce
|
d757910db1631e26e489a10a99fa67cd74292c4e
|
[
"Apache-2.0"
] | 11
|
2019-11-11T23:27:21.000Z
|
2021-07-19T16:41:44.000Z
|
hackathons/migrations/0014_auto_20190703_0453.py
|
Tookmund/hackerforce
|
d757910db1631e26e489a10a99fa67cd74292c4e
|
[
"Apache-2.0"
] | 11
|
2019-12-24T17:10:05.000Z
|
2021-06-09T18:22:59.000Z
|
hackathons/migrations/0014_auto_20190703_0453.py
|
hackumass/hackerforce
|
dfb6ac1304a7db21853765de9da795e8e9ef20bf
|
[
"Apache-2.0"
] | 7
|
2019-11-21T03:32:06.000Z
|
2021-07-18T15:30:29.000Z
|
# Generated by Django 2.1.9 on 2019-07-03 04:53
from django.db import migrations, models
| 27.791667
| 161
| 0.593703
|
# Generated by Django 2.1.9 on 2019-07-03 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hackathons', '0013_auto_20190702_0649'),
]
operations = [
migrations.AddField(
model_name='lead',
name='times_contacted',
field=models.IntegerField(blank=True, default=1),
),
migrations.AlterField(
model_name='lead',
name='status',
field=models.CharField(choices=[('contacted', 'Contacted'), ('ghosted', 'Ghosted'), ('responded', 'Responded')], default='contacted', max_length=20),
),
]
| 0
| 553
| 23
|
ed9008157534fcd41bdd7451e2a5f8dc32ad3e1a
| 7,450
|
py
|
Python
|
oidv6/samples/run.py
|
chuangzhu/OIDv6
|
e46e66770c520c02e268f0b021fa72451c79ad1e
|
[
"MIT"
] | null | null | null |
oidv6/samples/run.py
|
chuangzhu/OIDv6
|
e46e66770c520c02e268f0b021fa72451c79ad1e
|
[
"MIT"
] | null | null | null |
oidv6/samples/run.py
|
chuangzhu/OIDv6
|
e46e66770c520c02e268f0b021fa72451c79ad1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Массовая загрузка набора данных Open Images Dataset V6
python oidv6/samples/run.py <command> --classes названия_классов_или_текстовый_файл
[--dataset Dataset --type_data train --limit 0 --multi_classes --yes --no_labels --hide_metadata --no_clear_shell]
"""
# ######################################################################################################################
# Импорт необходимых инструментов
# ######################################################################################################################
from datetime import datetime # Работа со временем
from types import ModuleType # Проверка объектов на модуль
# Персональные
import oidv6 # Массовая загрузка набора данных Open Images Dataset V6
from oidv6.OIDv6 import OIDv6 # Массовая загрузка набора данных Open Images Dataset V6
from oidv6.modules.trml.shell import Shell # Работа с Shell
# ######################################################################################################################
# Сообщения
# ######################################################################################################################
class Messages(OIDv6):
"""Класс для сообщений"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
# ######################################################################################################################
# Выполняем только в том случае, если файл запущен сам по себе
# ######################################################################################################################
class Run(Messages):
"""Класс для массовой загрузки набора данных Open Images Dataset V6"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# Внутренние методы
# ------------------------------------------------------------------------------------------------------------------
# Построение аргументов командной строки
def _build_args(self, conv_to_dict = True):
"""
Построение аргументов командной строки
([bool]) -> None or dict
Аргументы:
conv_to_dict - Преобразование списка аргументов командной строки в словарь
Возвращает: dict если парсер командной строки окончательный, в обратном случае None
"""
super().build_args(False) # Выполнение функции из суперкласса
# Добавление аргументов в парсер командной строки
self._ap.add_argument('command', metavar = '<command> downloader',
choices = self.commands, help = self._('Команда загрузки'))
self._ap.add_argument('--dataset', required = False, metavar = self._('путь_к_директории'),
default = self.dir,
help = self._('Корневая директория для сохранения OIDv6, значение по умолчанию:') +
' %(default)s')
self._ap.add_argument('--type_data', required = False, choices = list(self.type_data.keys()) + ['all'],
default = 'train', metavar = 'train, validation, test ' + self._('или') + ' all',
help = self._('Набор данных, значение по умолчанию:') + ' %(default)s')
self._ap.add_argument('--classes', required = False, nargs = '+', metavar = self._('название_класса'),
help = self._('Последовательность названий классов или текстовый файл'))
self._ap.add_argument('--limit', required = False, default = 0, type = int, metavar = self._('целое_число'),
help = self._('Лимит загрузки изображений, значение по умолчанию:') +
' %(default)s (' + self._('нет лимита') + ')')
self._ap.add_argument('--multi_classes', required = False, action = 'store_true',
help = self._('Загрузка классов в одну директорию'))
self._ap.add_argument('--yes', required = False, action = 'store_true',
help = self._('Автоматическая загрузка служебных файлов'))
self._ap.add_argument('--no_labels', required = False, action = 'store_true',
help = self._('Не формировать метки'))
self._ap.add_argument('--hide_metadata', required = False, action = 'store_true',
help = self._('Вывод метаданных'))
self._ap.add_argument('--no_clear_shell', required = False, action = 'store_false',
help = self._('Не очищать консоль перед выполнением'))
# Преобразование списка аргументов командной строки в словарь
if conv_to_dict is True:
args, _ = self._ap.parse_known_args()
return vars(args) # Преобразование списка аргументов командной строки в словарь
# ------------------------------------------------------------------------------------------------------------------
# Внешние методы
# ------------------------------------------------------------------------------------------------------------------
# Запуск
def run(self, metadata = oidv6, out = True):
"""
Запуск
([module, module, bool, bool]) -> None
Аргументы:
out - Печатать процесс выполнения
"""
# Проверка аргументов
if type(out) is not bool or not isinstance(metadata, ModuleType):
# Вывод сообщения
if out is True:
print(self._invalid_arguments.format(
self.red, datetime.now().strftime(self._format_time),
self.end, __class__.__name__ + '.' + self.run.__name__
))
return False
self._args = self._build_args() # Построение аргументов командной строки
self.clear_shell(self._args['no_clear_shell']) # Очистка консоли перед выполнением
# Приветствие
Shell.add_line() # Добавление линии во весь экран
print(self._oidv6.format(self.bold, self.blue, self.end))
Shell.add_line() # Добавление линии во весь экран
# Запуск
if self._args['hide_metadata'] is False:
print(self._metadata.format(
datetime.now().strftime(self._format_time),
metadata.__author__,
metadata.__email__,
metadata.__maintainer__,
metadata.__version__
))
Shell.add_line() # Добавление линии во весь экран
self.download(self._args, out)
if __name__ == "__main__":
main()
| 45.426829
| 120
| 0.468725
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Массовая загрузка набора данных Open Images Dataset V6
python oidv6/samples/run.py <command> --classes названия_классов_или_текстовый_файл
[--dataset Dataset --type_data train --limit 0 --multi_classes --yes --no_labels --hide_metadata --no_clear_shell]
"""
# ######################################################################################################################
# Импорт необходимых инструментов
# ######################################################################################################################
from datetime import datetime # Работа со временем
from types import ModuleType # Проверка объектов на модуль
# Персональные
import oidv6 # Массовая загрузка набора данных Open Images Dataset V6
from oidv6.OIDv6 import OIDv6 # Массовая загрузка набора данных Open Images Dataset V6
from oidv6.modules.trml.shell import Shell # Работа с Shell
# ######################################################################################################################
# Сообщения
# ######################################################################################################################
class Messages(OIDv6):
"""Класс для сообщений"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
super().__init__() # Выполнение конструктора из суперкласса
self._oidv6 = self._('{}{}OIDv6 - Массовая загрузка набора данных Open Images Dataset V6 ...{}')
# ######################################################################################################################
# Выполняем только в том случае, если файл запущен сам по себе
# ######################################################################################################################
class Run(Messages):
"""Класс для массовой загрузки набора данных Open Images Dataset V6"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
super().__init__() # Выполнение конструктора из суперкласса
# ------------------------------------------------------------------------------------------------------------------
# Внутренние методы
# ------------------------------------------------------------------------------------------------------------------
# Построение аргументов командной строки
def _build_args(self, conv_to_dict = True):
"""
Построение аргументов командной строки
([bool]) -> None or dict
Аргументы:
conv_to_dict - Преобразование списка аргументов командной строки в словарь
Возвращает: dict если парсер командной строки окончательный, в обратном случае None
"""
super().build_args(False) # Выполнение функции из суперкласса
# Добавление аргументов в парсер командной строки
self._ap.add_argument('command', metavar = '<command> downloader',
choices = self.commands, help = self._('Команда загрузки'))
self._ap.add_argument('--dataset', required = False, metavar = self._('путь_к_директории'),
default = self.dir,
help = self._('Корневая директория для сохранения OIDv6, значение по умолчанию:') +
' %(default)s')
self._ap.add_argument('--type_data', required = False, choices = list(self.type_data.keys()) + ['all'],
default = 'train', metavar = 'train, validation, test ' + self._('или') + ' all',
help = self._('Набор данных, значение по умолчанию:') + ' %(default)s')
self._ap.add_argument('--classes', required = False, nargs = '+', metavar = self._('название_класса'),
help = self._('Последовательность названий классов или текстовый файл'))
self._ap.add_argument('--limit', required = False, default = 0, type = int, metavar = self._('целое_число'),
help = self._('Лимит загрузки изображений, значение по умолчанию:') +
' %(default)s (' + self._('нет лимита') + ')')
self._ap.add_argument('--multi_classes', required = False, action = 'store_true',
help = self._('Загрузка классов в одну директорию'))
self._ap.add_argument('--yes', required = False, action = 'store_true',
help = self._('Автоматическая загрузка служебных файлов'))
self._ap.add_argument('--no_labels', required = False, action = 'store_true',
help = self._('Не формировать метки'))
self._ap.add_argument('--hide_metadata', required = False, action = 'store_true',
help = self._('Вывод метаданных'))
self._ap.add_argument('--no_clear_shell', required = False, action = 'store_false',
help = self._('Не очищать консоль перед выполнением'))
# Преобразование списка аргументов командной строки в словарь
if conv_to_dict is True:
args, _ = self._ap.parse_known_args()
return vars(args) # Преобразование списка аргументов командной строки в словарь
# ------------------------------------------------------------------------------------------------------------------
# Внешние методы
# ------------------------------------------------------------------------------------------------------------------
# Запуск
def run(self, metadata = oidv6, out = True):
"""
Запуск
([module, module, bool, bool]) -> None
Аргументы:
out - Печатать процесс выполнения
"""
# Проверка аргументов
if type(out) is not bool or not isinstance(metadata, ModuleType):
# Вывод сообщения
if out is True:
print(self._invalid_arguments.format(
self.red, datetime.now().strftime(self._format_time),
self.end, __class__.__name__ + '.' + self.run.__name__
))
return False
self._args = self._build_args() # Построение аргументов командной строки
self.clear_shell(self._args['no_clear_shell']) # Очистка консоли перед выполнением
# Приветствие
Shell.add_line() # Добавление линии во весь экран
print(self._oidv6.format(self.bold, self.blue, self.end))
Shell.add_line() # Добавление линии во весь экран
# Запуск
if self._args['hide_metadata'] is False:
print(self._metadata.format(
datetime.now().strftime(self._format_time),
metadata.__author__,
metadata.__email__,
metadata.__maintainer__,
metadata.__version__
))
Shell.add_line() # Добавление линии во весь экран
self.download(self._args, out)
def main():
run = Run()
run.run()
if __name__ == "__main__":
main()
| 359
| 0
| 77
|
fe4004b04e6ea7f0d42e97de6ac9ec98151e1cda
| 4,585
|
py
|
Python
|
tests/_internal/test_auth_handling.py
|
unparalleled-js/py42
|
8c6b054ddd8c2bfea92bf77b0d648af76f1efcf1
|
[
"MIT"
] | 1
|
2020-08-18T22:00:22.000Z
|
2020-08-18T22:00:22.000Z
|
tests/_internal/test_auth_handling.py
|
unparalleled-js/py42
|
8c6b054ddd8c2bfea92bf77b0d648af76f1efcf1
|
[
"MIT"
] | null | null | null |
tests/_internal/test_auth_handling.py
|
unparalleled-js/py42
|
8c6b054ddd8c2bfea92bf77b0d648af76f1efcf1
|
[
"MIT"
] | 1
|
2021-05-10T23:33:34.000Z
|
2021-05-10T23:33:34.000Z
|
import pytest
from requests import Response
from py42._internal.auth_handling import AuthHandler
from py42._internal.auth_handling import HeaderModifier
from py42._internal.auth_handling import TokenProvider
ORIGINAL_VALUE = "test-original-value"
UPDATED_VALUE = "test-updated-value"
CUSTOM_NAME = "Custom-Name"
DEFAULT_HEADER = "Authorization"
TEST_SECRET = "TEST-SECRET"
@pytest.fixture
@pytest.fixture
| 38.208333
| 116
| 0.844711
|
import pytest
from requests import Response
from py42._internal.auth_handling import AuthHandler
from py42._internal.auth_handling import HeaderModifier
from py42._internal.auth_handling import TokenProvider
ORIGINAL_VALUE = "test-original-value"
UPDATED_VALUE = "test-updated-value"
CUSTOM_NAME = "Custom-Name"
DEFAULT_HEADER = "Authorization"
TEST_SECRET = "TEST-SECRET"
@pytest.fixture
def mock_token_provider(mocker):
provider = mocker.MagicMock(spec=TokenProvider)
provider.get_secret_value.return_value = TEST_SECRET
return provider
@pytest.fixture
def mock_header_modifier(mocker):
return mocker.MagicMock(spec=HeaderModifier)
def test_auth_handler_constructs_successfully():
assert AuthHandler(TokenProvider(), HeaderModifier())
def test_auth_handler_renew_authentication_using_cache_calls_get_secret_value_on_token_provider_with_correct_params(
mock_token_provider, mock_header_modifier, mock_session
):
auth_handler = AuthHandler(mock_token_provider, mock_header_modifier)
auth_handler.renew_authentication(mock_session, use_cache=True)
mock_token_provider.get_secret_value.assert_called_once_with(force_refresh=False)
def test_auth_handler_renew_authentication_no_cache_calls_get_secret_value_on_token_provider_with_correct_params(
mock_token_provider, mock_header_modifier, mock_session
):
auth_handler = AuthHandler(mock_token_provider, mock_header_modifier)
auth_handler.renew_authentication(mock_session)
mock_token_provider.get_secret_value.assert_called_once_with(force_refresh=True)
def test_auth_handler_renew_authentication_using_cache_calls_modify_session_on_session_modifier_with_correct_params(
mock_token_provider, mock_header_modifier, mock_session
):
auth_handler = AuthHandler(mock_token_provider, mock_header_modifier)
auth_handler.renew_authentication(mock_session, use_cache=True)
mock_header_modifier.modify_session.assert_called_once_with(
mock_session, TEST_SECRET
)
def test_auth_handler_renew_authentication_no_cache_calls_modify_session_on_session_modifier_with_correct_params(
mock_token_provider, mock_header_modifier, mock_session
):
auth_handler = AuthHandler(mock_token_provider, mock_header_modifier)
auth_handler.renew_authentication(mock_session)
mock_header_modifier.modify_session.assert_called_once_with(
mock_session, TEST_SECRET
)
def test_auth_handler_response_indicates_unauthorized_returns_true_for_401(mocker):
mock_response = mocker.MagicMock(spec=Response)
mock_response.status_code = 401
assert AuthHandler.response_indicates_unauthorized(mock_response)
def test_auth_handler_response_indicates_unauthorized_returns_false_for_non_401(mocker):
mock_response = mocker.MagicMock(spec=Response)
mock_response.status_code = 200
assert not AuthHandler.response_indicates_unauthorized(mock_response)
def test_header_modifier_constructs_successfully():
assert HeaderModifier()
def test_header_modifier_adds_default_header_by_default(mock_session):
header_modifier = HeaderModifier()
header_modifier.modify_session(mock_session, ORIGINAL_VALUE)
assert DEFAULT_HEADER in mock_session.headers
def test_header_modifier_adds_specified_header(mock_session):
header_modifier = HeaderModifier(CUSTOM_NAME)
header_modifier.modify_session(mock_session, ORIGINAL_VALUE)
assert CUSTOM_NAME in mock_session.headers
def test_header_modifier_sets_default_header_to_given_value(mock_session):
header_modifier = HeaderModifier()
header_modifier.modify_session(mock_session, ORIGINAL_VALUE)
assert mock_session.headers.get(DEFAULT_HEADER) == ORIGINAL_VALUE
def test_header_modifier_sets_specified_header_to_given_value(mock_session):
header_modifier = HeaderModifier(CUSTOM_NAME)
header_modifier.modify_session(mock_session, ORIGINAL_VALUE)
assert mock_session.headers.get(CUSTOM_NAME) == ORIGINAL_VALUE
def test_header_modifier_updates_default_header_if_present(mock_session):
header_modifier = HeaderModifier()
header_modifier.modify_session(mock_session, ORIGINAL_VALUE)
header_modifier.modify_session(mock_session, UPDATED_VALUE)
assert mock_session.headers.get(DEFAULT_HEADER) == UPDATED_VALUE
def test_header_modifier_updates_specified_header_if_present(mock_session):
header_modifier = HeaderModifier(CUSTOM_NAME)
header_modifier.modify_session(mock_session, ORIGINAL_VALUE)
header_modifier.modify_session(mock_session, UPDATED_VALUE)
assert mock_session.headers.get(CUSTOM_NAME) == UPDATED_VALUE
| 3,793
| 0
| 366
|
a879ae95f3a28053a492d4080bbb2ca055bd192d
| 5,058
|
py
|
Python
|
python/tensor/tensor_new.py
|
BenOsborn/Cerci
|
5785ae0c9db8a88a5ac8d91aed29cdf0c0c7854a
|
[
"Apache-2.0"
] | null | null | null |
python/tensor/tensor_new.py
|
BenOsborn/Cerci
|
5785ae0c9db8a88a5ac8d91aed29cdf0c0c7854a
|
[
"Apache-2.0"
] | null | null | null |
python/tensor/tensor_new.py
|
BenOsborn/Cerci
|
5785ae0c9db8a88a5ac8d91aed29cdf0c0c7854a
|
[
"Apache-2.0"
] | null | null | null |
# There is probably a more efficient way to do this
# Now I need to do the actual backwards function
| 38.318182
| 124
| 0.619217
|
class AddElementwise:
@staticmethod
def forward(matrix_left, matrix_right, backwards=False):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [a+b for a, b in zip(matrix_left.tensor, matrix_right.tensor)]
if (not backwards):
return Tensor(new_tensor, matrix_left.shape, left=matrix_left, right=matrix_right,
track_grad=(matrix_left.track_grad or matrix_right.track_grad), operator=AddElementwise)
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddleft(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [1 for _ in range(matrix_left.size)]
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddright(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [1 for _ in range(matrix_left.size)]
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
class MultiplyElementwise:
@staticmethod
def forward(matrix_left, matrix_right, backwards=False):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [a*b for a, b in zip(matrix_left.tensor, matrix_right.tensor)]
if (not backwards):
return Tensor(new_tensor, matrix_left.shape, left=matrix_left, right=matrix_right,
track_grad=(matrix_left.track_grad or matrix_right.track_grad), operator=MultiplyElementwise)
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddleft(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
return Tensor(matrix_right.tensor.copy(), matrix_right.shape.copy(), left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddright(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
return Tensor(matrix_left.tensor.copy(), matrix_left.shape.copy(), left=None, right=None,
track_grad=False, operator=None)
class TensorBase:
def __init__(self, tensor, shape): # The left and the right will contain the links to the other nodes in the tree
self.dims = len(shape)
self.size = len(tensor)
check_length = 1
for i in range(self.dims):
check_length *= shape[i]
assert(check_length == self.size)
self.tensor = tensor
self.shape = shape
def __str__(self):
return self.__string()
# There is probably a more efficient way to do this
def __string(self, index=-1, position=0):
if (abs(index) == self.dims):
mat = "[ "
for i in range(self.shape[0]):
mat += f"{self.tensor[position + i]} "
mat += "]"
return mat
mat_final = "[ "
product = 1
for i in range(self.dims + index):
product *= self.shape[i]
for i in range(self.shape[index]):
mat_final += f"\n{abs(index) * ' '}{ self.__string(index-1, position+product*i)} "
return f"{mat_final}\n{(abs(index) - 1) * ' '}]" if (index != -1) else f"{mat_final}\n]"
def __add__(self, other):
return AddElementwise.forward(self, other)
def __mul__(self, other):
return MultiplyElementwise.forward(self, other)
class Tensor(TensorBase):
def __init__(self, tensor, shape, left=None, right=None, track_grad=False, operator=None):
super().__init__(tensor, shape)
self.track_grad = track_grad
if (track_grad):
self.operator = operator
self.left = left
self.right = right
self.grad = Tensor([0 for _ in range(self.size)], self.shape)
def zeroGrad(self):
if (self.track_grad):
self.grad = Tensor([0 for _ in range(self.size)], self.shape)
if (self.left != None):
self.left.zeroGrad()
if (self.right != None):
self.right.zeroGrad()
# Now I need to do the actual backwards function
def backwards(self, factors=None):
if (factors == None):
factors = Tensor([1 for _ in range(self.size)], self.shape)
self.grad = AddElementwise.forward(self.grad, factors, backwards=True)
# I cant just use none when I port this to C++
if (self.left != None):
self.left.backwards(factors=MultiplyElementwise.forward(factors, self.operator.ddleft(self.left, self.right)))
if (self.right != None):
self.right.backwards(factors=MultiplyElementwise.forward(factors, self.operator.ddright(self.left, self.right)))
| 4,366
| 275
| 307
|
467c004ada7353bc4bb87367e3774a1bba52e193
| 3,027
|
py
|
Python
|
dfu/host/hex2dfu.py
|
LeHonk/usb-stack
|
3869706a951eb00bf9ab630f0adb27c5676c3426
|
[
"MIT"
] | null | null | null |
dfu/host/hex2dfu.py
|
LeHonk/usb-stack
|
3869706a951eb00bf9ab630f0adb27c5676c3426
|
[
"MIT"
] | null | null | null |
dfu/host/hex2dfu.py
|
LeHonk/usb-stack
|
3869706a951eb00bf9ab630f0adb27c5676c3426
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
# TODO: Actual values
devid = { 'p24fj256gb106': 0xFFFF
, 'p18f2550': 0x1234
}
targetmem = { 'int_flash': 0
, 'int_eeprom': 1
# , 'ext_flash': 2
# , 'ext_eeprom': 3
}
# TODO: Actual values
maxmem = { 'p24fj256gb106': {'int_flash':255*1024, 'int_eeprom':2048}
, 'p18f2550': {'int_flash':255*1024, 'int_eeprom':2048}
}
blocksize = { 'p24fj256gb106': {'int_flash':64, 'int_eeprom':16}
, 'p18f2550': {'int_flash':32, 'int_eeprom':16}
}
if __name__ = '__main__':
import sys
import argparse
import os.path
from intelhex import IntelHex
from cStringIO import StringIO
from dfu_suffix import *
parser = argparse.ArgumentParser( description='Convert an Intel HEX file into a dfu file suitable for OpenPICUSB bootloader.',
epilog='''Default output filename is the input filename with
".dfu" in stead of ".hex".''')
action = parser.add_mutually_exclusive_group( required=True )
# parser.add_argument( '-f', '--force', help='Forcefully try to execute given command. May result in unusable files.', action='store_true', default=False )
parser.add_argument( '-p', '--processor', help='Target processor (currently only p18f2550 and p24fj256bg106)', dest='proc', nargs=1, choices=devid, required=True )
parser.add_argument( '-t', '--targetmem', help='Target memory', nargs=1, choices=targetmem, default='int_flash' )
parser.add_argument( '-o', '--output', help='Output file.', type=argparse.FileType('wb'), dest='outfile', nargs=1, metavar='file.dfu' )
parser.add_argument( 'hexfile', help='Firmware file with DFU suffix.', type=argparse.FileType('r'), nargs=1 )
parser.add_argument( 'vid', help='The Vendor ID to use.', action='store', type=int, nargs='?', default=0xFFFF );
parser.add_argument( 'pid', help='The Product ID to use.', action='store', type=int, nargs='?', default=0xFFFF );
parser.add_argument( 'did', help='The Device version to use.', action='store', type=int, nargs='?', default=0xFFFF );
args = parser.parse_args()
(rootname, ext) = os.path.splitext( args.hexfile.name )
try:
ih = IntelHex.fromfile(hexfile)
except FileNotFoundException:
print 'File "%(name)s" not found.' % args.hexfile
sys.exit(1)
hexfile.close();
blob = StringIO()
PROC = args.proc[0]
TGTMEM = args.targetmem[0]
DEVID = devid[PROC]
MAXMEM = maxmem[PROC][TGTMEM]
BLOCKSIZE = blocksize[PROC][TGTMEM]
# Construct bootloader header
blob.write( 'HBL\x01' ) # Magic identifier
blob.write( struct.pack('>h', DEVID ) # Device ID in big endian 16bits
blob.write( struct.pack('>h', tgt_mem[TGTMEM] ) # Target memory
for addr in range(0, MAXMEM, BLOCKSIZE):
blob.write(struct.pack('>l', addr)
ih.tobinfile(blob, start=addr, size=BLOCKSIZE)
blob_suffix = Suffix._make( args.did, args.pid, args.vid, 0x0100, 'DFU', 16, 0 )
firmware = append_suffix(blob, user_suffix)
if args.outfile is None:
args.outfile = open( rootname + '.dfu', 'wb' )
args.outfile.write(firmware)
outfile.close()
| 36.914634
| 164
| 0.682524
|
#!/usr/bin/env python2.7
# TODO: Actual values
devid = { 'p24fj256gb106': 0xFFFF
, 'p18f2550': 0x1234
}
targetmem = { 'int_flash': 0
, 'int_eeprom': 1
# , 'ext_flash': 2
# , 'ext_eeprom': 3
}
# TODO: Actual values
maxmem = { 'p24fj256gb106': {'int_flash':255*1024, 'int_eeprom':2048}
, 'p18f2550': {'int_flash':255*1024, 'int_eeprom':2048}
}
blocksize = { 'p24fj256gb106': {'int_flash':64, 'int_eeprom':16}
, 'p18f2550': {'int_flash':32, 'int_eeprom':16}
}
if __name__ = '__main__':
import sys
import argparse
import os.path
from intelhex import IntelHex
from cStringIO import StringIO
from dfu_suffix import *
parser = argparse.ArgumentParser( description='Convert an Intel HEX file into a dfu file suitable for OpenPICUSB bootloader.',
epilog='''Default output filename is the input filename with
".dfu" in stead of ".hex".''')
action = parser.add_mutually_exclusive_group( required=True )
# parser.add_argument( '-f', '--force', help='Forcefully try to execute given command. May result in unusable files.', action='store_true', default=False )
parser.add_argument( '-p', '--processor', help='Target processor (currently only p18f2550 and p24fj256bg106)', dest='proc', nargs=1, choices=devid, required=True )
parser.add_argument( '-t', '--targetmem', help='Target memory', nargs=1, choices=targetmem, default='int_flash' )
parser.add_argument( '-o', '--output', help='Output file.', type=argparse.FileType('wb'), dest='outfile', nargs=1, metavar='file.dfu' )
parser.add_argument( 'hexfile', help='Firmware file with DFU suffix.', type=argparse.FileType('r'), nargs=1 )
parser.add_argument( 'vid', help='The Vendor ID to use.', action='store', type=int, nargs='?', default=0xFFFF );
parser.add_argument( 'pid', help='The Product ID to use.', action='store', type=int, nargs='?', default=0xFFFF );
parser.add_argument( 'did', help='The Device version to use.', action='store', type=int, nargs='?', default=0xFFFF );
args = parser.parse_args()
(rootname, ext) = os.path.splitext( args.hexfile.name )
try:
ih = IntelHex.fromfile(hexfile)
except FileNotFoundException:
print 'File "%(name)s" not found.' % args.hexfile
sys.exit(1)
hexfile.close();
blob = StringIO()
PROC = args.proc[0]
TGTMEM = args.targetmem[0]
DEVID = devid[PROC]
MAXMEM = maxmem[PROC][TGTMEM]
BLOCKSIZE = blocksize[PROC][TGTMEM]
# Construct bootloader header
blob.write( 'HBL\x01' ) # Magic identifier
blob.write( struct.pack('>h', DEVID ) # Device ID in big endian 16bits
blob.write( struct.pack('>h', tgt_mem[TGTMEM] ) # Target memory
for addr in range(0, MAXMEM, BLOCKSIZE):
blob.write(struct.pack('>l', addr)
ih.tobinfile(blob, start=addr, size=BLOCKSIZE)
blob_suffix = Suffix._make( args.did, args.pid, args.vid, 0x0100, 'DFU', 16, 0 )
firmware = append_suffix(blob, user_suffix)
if args.outfile is None:
args.outfile = open( rootname + '.dfu', 'wb' )
args.outfile.write(firmware)
outfile.close()
| 0
| 0
| 0
|
43d92d931d95fafab64fab33655d9809b86351cf
| 2,098
|
py
|
Python
|
plots/midterm/FR_illustration.py
|
jokteur/ASMA
|
25ac8a0455c680232d56c18d31de62c3188b7153
|
[
"MIT"
] | 2
|
2021-11-01T09:13:17.000Z
|
2022-03-08T14:34:16.000Z
|
plots/midterm/FR_illustration.py
|
jokteur/ASMA
|
25ac8a0455c680232d56c18d31de62c3188b7153
|
[
"MIT"
] | null | null | null |
plots/midterm/FR_illustration.py
|
jokteur/ASMA
|
25ac8a0455c680232d56c18d31de62c3188b7153
|
[
"MIT"
] | null | null | null |
import time
import copy
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM
from flowrect.simulations import particle_population, flow_rectification, quasi_renewal
# Plot saving parameters
save = False
save_path = ""
save_name = "m_t2.pdf"
# Simulation parameters
Lambda = np.array([33.0, 8.0])
Gamma = np.array([-8, 1.0])
N = 10
dt = 1e-4
np.random.seed(123)
ts, M, spikes, A, X = particle_population(
0.18, dt, Gamma, Lambda, 0, 3, 0, 2, c=10, Gamma_ext=True, N=N
)
mask = spikes.T == 1
ticks = ts[spikes.T[0] == 1]
ticks_text = [r"$t^{(1)}$", r"$t^{(2)}$"]
fig = plt.figure(figsize=(6, 4))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
# Calculate m_t
spike_mask = spikes.T[0] == 1
m_t = np.zeros(len(ts))
for s in range(1, len(ts)):
if spike_mask[s]:
m_t[s] = M[s, 0, 1]
else:
m_t[s] = m_t[s - 1]
# Leaky memory plot
ax1 = plt.subplot(gs[0])
ax1.set_yticks([])
ax1.plot(ts, M[:, 0, 1], "-k", linewidth=0.9, label=r"$M$")
ax1.plot(ts, m_t, "-r", linewidth=0.9, label=r"$m_t$")
ax1.set_ylim(0, 2)
ax1.legend()
text = (
r"$m_t(t^{(2)}) = m_t(t^{(1)})"
"\cdot e^{-\lambda (t^{(2)} - t^{(1)})} + \Gamma$"
"\n"
r" $= m_t(t^{(1)}) \cdot e^{-\lambda a} + \Gamma $"
)
ax1.annotate(
text,
color="grey",
xy=(0.11, 1.08),
xycoords="data",
xytext=(0.2, 0.9),
textcoords="axes fraction",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.3"),
horizontalalignment="left",
verticalalignment="top",
)
# Spike plot
ax2 = plt.subplot(gs[1], sharex=ax1)
ax2.eventplot(
ts[mask[0]],
lineoffsets=0.5,
colors="black",
linewidths=0.5,
)
ax2.set_xticks(ticks)
ax2.set_xticklabels(ticks_text)
ax2.set_yticks([])
ax2.set_ylabel("Spikes")
ax2.set_ylim(0, 1)
if save:
fig.savefig(os.path.join(save_path, save_name), transparent=True)
plt.show()
| 22.804348
| 87
| 0.64204
|
import time
import copy
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM
from flowrect.simulations import particle_population, flow_rectification, quasi_renewal
# Plot saving parameters
save = False
save_path = ""
save_name = "m_t2.pdf"
# Simulation parameters
Lambda = np.array([33.0, 8.0])
Gamma = np.array([-8, 1.0])
N = 10
dt = 1e-4
np.random.seed(123)
ts, M, spikes, A, X = particle_population(
0.18, dt, Gamma, Lambda, 0, 3, 0, 2, c=10, Gamma_ext=True, N=N
)
mask = spikes.T == 1
ticks = ts[spikes.T[0] == 1]
ticks_text = [r"$t^{(1)}$", r"$t^{(2)}$"]
fig = plt.figure(figsize=(6, 4))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
# Calculate m_t
spike_mask = spikes.T[0] == 1
m_t = np.zeros(len(ts))
for s in range(1, len(ts)):
if spike_mask[s]:
m_t[s] = M[s, 0, 1]
else:
m_t[s] = m_t[s - 1]
# Leaky memory plot
ax1 = plt.subplot(gs[0])
ax1.set_yticks([])
ax1.plot(ts, M[:, 0, 1], "-k", linewidth=0.9, label=r"$M$")
ax1.plot(ts, m_t, "-r", linewidth=0.9, label=r"$m_t$")
ax1.set_ylim(0, 2)
ax1.legend()
text = (
r"$m_t(t^{(2)}) = m_t(t^{(1)})"
"\cdot e^{-\lambda (t^{(2)} - t^{(1)})} + \Gamma$"
"\n"
r" $= m_t(t^{(1)}) \cdot e^{-\lambda a} + \Gamma $"
)
ax1.annotate(
text,
color="grey",
xy=(0.11, 1.08),
xycoords="data",
xytext=(0.2, 0.9),
textcoords="axes fraction",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.3"),
horizontalalignment="left",
verticalalignment="top",
)
# Spike plot
ax2 = plt.subplot(gs[1], sharex=ax1)
ax2.eventplot(
ts[mask[0]],
lineoffsets=0.5,
colors="black",
linewidths=0.5,
)
ax2.set_xticks(ticks)
ax2.set_xticklabels(ticks_text)
ax2.set_yticks([])
ax2.set_ylabel("Spikes")
ax2.set_ylim(0, 1)
if save:
fig.savefig(os.path.join(save_path, save_name), transparent=True)
plt.show()
| 0
| 0
| 0
|
29d451b8dd8c0ba5e18591eb336cbb335fe1a3fb
| 3,311
|
py
|
Python
|
tk_recoder/tab_text_converter.py
|
anton-pribora/py-recoder
|
ee3cd3a6dc9ff78081ed963a16d765d0a004f4d6
|
[
"MIT"
] | null | null | null |
tk_recoder/tab_text_converter.py
|
anton-pribora/py-recoder
|
ee3cd3a6dc9ff78081ed963a16d765d0a004f4d6
|
[
"MIT"
] | null | null | null |
tk_recoder/tab_text_converter.py
|
anton-pribora/py-recoder
|
ee3cd3a6dc9ff78081ed963a16d765d0a004f4d6
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
import tkinter.scrolledtext as st
from tkinter import filedialog
from functools import partial
"""
Вкладка "перекодировать текст"
"""
def init_frame(self, frame: tk.Frame):
"""
Инициализация вкладки "Перекодировать текст"
:param tk_recoder.gui.Gui self: Основное окно программы
:param frame: Контейнер вкладки
:return: None
"""
buttons = tk.Frame(frame)
buttons.pack(fill='x', padx=10, pady=(10, 0))
buttons.columnconfigure(5, weight=1)
texts = tk.Frame(frame)
texts.pack(fill='both', expand=1, pady=(10, 0), padx=10)
self.tc_text_from = st.ScrolledText(texts, width=30, height=3)
self.tc_text_from.pack(side='left', expand=1, fill='both', padx=(0, 2))
self.tc_text_from.insert(tk.INSERT, "Привет мир!")
self.tc_text_to = st.ScrolledText(texts, width=30, height=3)
self.tc_text_to.pack(side='right', expand=1, fill='both', padx=(2, 0))
ttk.Label(buttons, text='Исходная').grid(column=0, row=0)
choices = self.recoder.text_encodings
self.tc_enc_from = tk.StringVar(self)
self.tc_enc_from.set(choices[0])
ttk.OptionMenu(buttons, self.tc_enc_from, self.tc_enc_from.get(), *choices).grid(column=1, row=0, padx=(10, 20))
ttk.Label(buttons, text='Конечная').grid(column=2, row=0)
self.tc_enc_to = tk.StringVar(self)
self.tc_enc_to.set(choices[0])
ttk.OptionMenu(buttons, self.tc_enc_to, self.tc_enc_to.get(), *choices).grid(column=3, row=0, padx=(10, 20))
enc_button = ttk.Button(buttons, text='Перекодировать', padding=(10, 3, 10, 3), command=convert)
enc_button.grid(column=4, row=0)
bt = tk.Menubutton(buttons, text='Сохранить как...', relief='raised', compound='right', padx=10)
popup = tk.Menu(bt, tearoff=0)
bt.configure(menu=popup)
for enc in self.recoder.file_encodings:
popup.add_command(label=enc, command=partial(save_as, enc))
frame.columnconfigure(5, weight=1)
bt.grid(column=5, row=0, padx=10, sticky=tk.E)
| 38.952941
| 116
| 0.598309
|
import tkinter as tk
from tkinter import ttk
import tkinter.scrolledtext as st
from tkinter import filedialog
from functools import partial
"""
Вкладка "перекодировать текст"
"""
def init_frame(self, frame: tk.Frame):
"""
Инициализация вкладки "Перекодировать текст"
:param tk_recoder.gui.Gui self: Основное окно программы
:param frame: Контейнер вкладки
:return: None
"""
buttons = tk.Frame(frame)
buttons.pack(fill='x', padx=10, pady=(10, 0))
buttons.columnconfigure(5, weight=1)
texts = tk.Frame(frame)
texts.pack(fill='both', expand=1, pady=(10, 0), padx=10)
self.tc_text_from = st.ScrolledText(texts, width=30, height=3)
self.tc_text_from.pack(side='left', expand=1, fill='both', padx=(0, 2))
self.tc_text_from.insert(tk.INSERT, "Привет мир!")
self.tc_text_to = st.ScrolledText(texts, width=30, height=3)
self.tc_text_to.pack(side='right', expand=1, fill='both', padx=(2, 0))
ttk.Label(buttons, text='Исходная').grid(column=0, row=0)
choices = self.recoder.text_encodings
self.tc_enc_from = tk.StringVar(self)
self.tc_enc_from.set(choices[0])
ttk.OptionMenu(buttons, self.tc_enc_from, self.tc_enc_from.get(), *choices).grid(column=1, row=0, padx=(10, 20))
ttk.Label(buttons, text='Конечная').grid(column=2, row=0)
self.tc_enc_to = tk.StringVar(self)
self.tc_enc_to.set(choices[0])
ttk.OptionMenu(buttons, self.tc_enc_to, self.tc_enc_to.get(), *choices).grid(column=3, row=0, padx=(10, 20))
def convert():
self.tc_text_to.replace(1.0, tk.END, self.recoder.convert_text(self.tc_enc_from.get(),
self.tc_enc_to.get(),
self.tc_text_from.get(1.0, tk.END)))
self.update_status(
'Текст перекодирован из {enc_from} в {enc_to}'.format(enc_from=self.tc_enc_from.get(),
enc_to=self.tc_enc_to.get()))
enc_button = ttk.Button(buttons, text='Перекодировать', padding=(10, 3, 10, 3), command=convert)
enc_button.grid(column=4, row=0)
bt = tk.Menubutton(buttons, text='Сохранить как...', relief='raised', compound='right', padx=10)
popup = tk.Menu(bt, tearoff=0)
bt.configure(menu=popup)
def save_as(encoding):
try:
file = filedialog.asksaveasfilename()
if file:
if encoding == self.recoder.utf8_bom_encoding:
file_encoding = 'utf-8-sig'
else:
file_encoding = encoding
with open(file, 'w', encoding=file_encoding, errors='replace') as fp:
fp.write(self.tc_text_to.get(1.0, tk.END))
self.update_status('Данные в кодировке {enc} сохранены в файл {file}'.format(enc=encoding,
file=file))
except Exception as err:
self.update_status('Ошибка: {err}'.format(err=str(err)))
for enc in self.recoder.file_encodings:
popup.add_command(label=enc, command=partial(save_as, enc))
frame.columnconfigure(5, weight=1)
bt.grid(column=5, row=0, padx=10, sticky=tk.E)
| 1,297
| 0
| 54
|
e35513963af5102e6dfdfe622a13488cd9004a55
| 595
|
py
|
Python
|
biggee.py
|
simretmengesha/PublicNLPA
|
eb62b32a3eb3f7db6fc03579c36f252b72266b65
|
[
"MIT"
] | null | null | null |
biggee.py
|
simretmengesha/PublicNLPA
|
eb62b32a3eb3f7db6fc03579c36f252b72266b65
|
[
"MIT"
] | null | null | null |
biggee.py
|
simretmengesha/PublicNLPA
|
eb62b32a3eb3f7db6fc03579c36f252b72266b65
|
[
"MIT"
] | null | null | null |
# Bigram formation
# using list comprehension + enumerate() + split()
# initializing list
test_list = ['በሙቀት ጀምሮ በቅዝቃዜ መጨረስ የዚህ ዓለም መገለጫ ሆኗል እልልታ በኡኡታ፣ ሠርግ በግልግል፣ ማሬ የሚለው ቃል እሬቴ በሚልይለወጣል', 'ጨርሰው የማይሰሩ አሳሳቢ አይደሉም፣ አይሠሩምና። ብልሽት ያለባቸው ማሞቂያዎች ግን ሰውዬው ሲሞክራቸው ይሠራሉ ']
# printing the original list
print ("The original list is : " + str(test_list))
# using list comprehension + enumerate() + split()
# for Bigram formation
res = [(x, i.split()[j + 1]) for i in test_list
for j, x in enumerate(i.split()) if j < len(i.split()) - 1]
# printing result
print ("The formed bigrams are : " + str(res))
| 33.055556
| 171
| 0.692437
|
# Bigram formation
# using list comprehension + enumerate() + split()
# initializing list
test_list = ['በሙቀት ጀምሮ በቅዝቃዜ መጨረስ የዚህ ዓለም መገለጫ ሆኗል እልልታ በኡኡታ፣ ሠርግ በግልግል፣ ማሬ የሚለው ቃል እሬቴ በሚልይለወጣል', 'ጨርሰው የማይሰሩ አሳሳቢ አይደሉም፣ አይሠሩምና። ብልሽት ያለባቸው ማሞቂያዎች ግን ሰውዬው ሲሞክራቸው ይሠራሉ ']
# printing the original list
print ("The original list is : " + str(test_list))
# using list comprehension + enumerate() + split()
# for Bigram formation
res = [(x, i.split()[j + 1]) for i in test_list
for j, x in enumerate(i.split()) if j < len(i.split()) - 1]
# printing result
print ("The formed bigrams are : " + str(res))
| 0
| 0
| 0
|
be1ca7378afd4819c2e9608b28938a17a267e2d1
| 2,342
|
py
|
Python
|
chia/consensus/coinbase.py
|
Hydrangea-Network/hydrangea-blockchain
|
d15662329958dbdaa9cbd99733ba729f0e74ce54
|
[
"Apache-2.0"
] | 1
|
2022-03-15T06:41:49.000Z
|
2022-03-15T06:41:49.000Z
|
chia/consensus/coinbase.py
|
Hydrangea-Network/hydrangea-blockchain
|
d15662329958dbdaa9cbd99733ba729f0e74ce54
|
[
"Apache-2.0"
] | null | null | null |
chia/consensus/coinbase.py
|
Hydrangea-Network/hydrangea-blockchain
|
d15662329958dbdaa9cbd99733ba729f0e74ce54
|
[
"Apache-2.0"
] | null | null | null |
from blspy import G1Element
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.ints import uint32, uint64
from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk
| 45.038462
| 114
| 0.799744
|
from blspy import G1Element
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.ints import uint32, uint64
from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk
def create_puzzlehash_for_pk(pub_key: G1Element) -> bytes32:
return puzzle_for_pk(pub_key).get_tree_hash()
def pool_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def community_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def staking_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def farmer_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[16:] + block_height.to_bytes(16, "big"))
def timelord_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def create_pool_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = pool_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_community_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = community_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_staking_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = staking_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_farmer_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = farmer_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_timelord_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = timelord_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
| 1,816
| 0
| 253
|
7190d4a76e2659164c8852533d26134a504bdea3
| 1,524
|
py
|
Python
|
apps/dcl/mdl/m_mdl.py
|
yt7589/cvep
|
1f77169bdbb614ea32a30d98eba87b028b19890b
|
[
"Apache-2.0"
] | null | null | null |
apps/dcl/mdl/m_mdl.py
|
yt7589/cvep
|
1f77169bdbb614ea32a30d98eba87b028b19890b
|
[
"Apache-2.0"
] | null | null | null |
apps/dcl/mdl/m_mdl.py
|
yt7589/cvep
|
1f77169bdbb614ea32a30d98eba87b028b19890b
|
[
"Apache-2.0"
] | 1
|
2020-09-24T04:28:20.000Z
|
2020-09-24T04:28:20.000Z
|
#
#
from os import stat
import pymongo
from apps.wxs.model.m_mongodb import MMongoDb
| 33.130435
| 83
| 0.599738
|
#
#
from os import stat
import pymongo
from apps.wxs.model.m_mongodb import MMongoDb
class MMdl(object):
def __init__(self):
self.name = 'apps.wxs.model.MModel'
@staticmethod
def is_model_exists(model_name):
query_cond = {'model_name': model_name}
fields = {'model_id': 1, 'model_name': 1, 'model_num': 1}
if MMongoDb.db['t_model'].find_one(query_cond, fields) is None:
return False
else:
return True
@staticmethod
def insert(model_vo):
'''
向t_model表中添加记录,model_vo中包括:
model_id, model_name, model_code, brand_id,
brand_code, model_num=1
'''
return MMongoDb.db['t_model'].insert_one(model_vo)
@staticmethod
def get_model_by_name(model_name):
query_cond = {'model_name': model_name}
fields = {'model_id': 1, 'model_name': 1, 'model_code': 1, 'model_num': 1}
return MMongoDb.db['t_model'].find_one(query_cond, fields)
@staticmethod
def get_model_vo_by_id(model_id):
query_cond = {'model_id': model_id}
fields = {'model_name': 1, 'model_code': 1, 'source_type': 1}
return MMongoDb.db['t_model'].find_one(query_cond, fields)
@staticmethod
def get_wxs_bms():
query_cond = {'source_type': 1}
fields = {'model_code':1, 'model_name': 1}
return MMongoDb.convert_recs(MMongoDb.db['t_model']\
.find(query_cond, fields))
| 919
| 515
| 24
|
04031ae8151added17a1d39405820110dd82354f
| 661
|
py
|
Python
|
dags/book_data.py
|
blue-yonder/airflow-plugin-demo
|
c7044f97532c2f2a3d674762498cb6e58c3e1a1c
|
[
"CC0-1.0"
] | 55
|
2016-07-23T21:09:43.000Z
|
2021-05-26T23:48:55.000Z
|
dags/book_data.py
|
blue-yonder/airflow-plugin-demo
|
c7044f97532c2f2a3d674762498cb6e58c3e1a1c
|
[
"CC0-1.0"
] | null | null | null |
dags/book_data.py
|
blue-yonder/airflow-plugin-demo
|
c7044f97532c2f2a3d674762498cb6e58c3e1a1c
|
[
"CC0-1.0"
] | 21
|
2016-10-24T17:15:32.000Z
|
2021-07-02T10:38:25.000Z
|
"""
Workflow definition to book data
"""
from __future__ import division, absolute_import, print_function
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
BookData
)
dag_id = "book_data"
schedule_interval = None
default_args = {
'owner': 'europython',
'depends_on_past': False,
'email': ['airflow@europython'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(seconds=30)
}
dag = DAG(
dag_id,
start_date=datetime(2016, 12, 7),
schedule_interval=schedule_interval,
default_args=default_args)
book = BookData(dag=dag)
| 19.441176
| 64
| 0.709531
|
"""
Workflow definition to book data
"""
from __future__ import division, absolute_import, print_function
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
BookData
)
dag_id = "book_data"
schedule_interval = None
default_args = {
'owner': 'europython',
'depends_on_past': False,
'email': ['airflow@europython'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(seconds=30)
}
dag = DAG(
dag_id,
start_date=datetime(2016, 12, 7),
schedule_interval=schedule_interval,
default_args=default_args)
book = BookData(dag=dag)
| 0
| 0
| 0
|
8e453bcd4f0e7baa3c63a6cdd373f14f6c2a93bc
| 1,103
|
py
|
Python
|
alex/components/tts/test_voicerss.py
|
oplatek/alex
|
73af644ec35c8a1cd0c37cd478c2afc1db717e0b
|
[
"Apache-2.0"
] | 184
|
2015-02-11T04:14:41.000Z
|
2022-03-24T21:43:58.000Z
|
alex/components/tts/test_voicerss.py
|
oplatek/alex
|
73af644ec35c8a1cd0c37cd478c2afc1db717e0b
|
[
"Apache-2.0"
] | 69
|
2015-01-11T04:57:22.000Z
|
2019-04-24T10:25:56.000Z
|
alex/components/tts/test_voicerss.py
|
oplatek/alex
|
73af644ec35c8a1cd0c37cd478c2afc1db717e0b
|
[
"Apache-2.0"
] | 61
|
2015-03-04T10:52:13.000Z
|
2022-03-04T12:14:06.000Z
|
from unittest import TestCase
from alex.components.tts.voicerss import VoiceRssTTS
import alex.utils.audio as audio
import wave
from alex.utils.config import as_project_path
__author__ = 'm2rtin'
| 30.638889
| 91
| 0.56029
|
from unittest import TestCase
from alex.components.tts.voicerss import VoiceRssTTS
import alex.utils.audio as audio
import wave
from alex.utils.config import as_project_path
__author__ = 'm2rtin'
class TestVoiceRssTTS(TestCase):
def test_synthesise_en(self):
text = 'Hello, this is alex, the call is recorded, how may I help You?'
cfg = {
'Audio': {
'sample_rate': 16000,
},
'TTS': {
'type': 'VoiceRss',
'VoiceRss': {
'language': 'en-us',
'preprocessing': as_project_path("resources/tts/prep_voicerss_en.cfg"),
'tempo': 1.0,
'api_key': 'ea29b823c83a426bbfe99f4cbce109f6'
}
}
}
wav_path = '/tmp/voice_rss_tts.wav'
tts = VoiceRssTTS(cfg)
wav = tts.synthesize(text)
audio.save_wav(cfg, wav_path, wav)
file = wave.open(wav_path)
wav_length = float(file.getnframes()) / file.getframerate()
self.assertEquals(5.292, wav_length)
| 845
| 11
| 49
|
1639b5d6c5f1c18d8197d2c4d3ba9d689afeb72f
| 2,096
|
py
|
Python
|
tests/test_cpu_ins_txa.py
|
hspaans/6502-emulator
|
02b802c43caf8a04833dd1f3d48077f9e2175e7e
|
[
"MIT"
] | null | null | null |
tests/test_cpu_ins_txa.py
|
hspaans/6502-emulator
|
02b802c43caf8a04833dd1f3d48077f9e2175e7e
|
[
"MIT"
] | null | null | null |
tests/test_cpu_ins_txa.py
|
hspaans/6502-emulator
|
02b802c43caf8a04833dd1f3d48077f9e2175e7e
|
[
"MIT"
] | null | null | null |
"""
TXA - Transfer Register X to Accumulator.
A = X
Copies the current contents of the X register into the accumulator and sets
the zero and negative flags as appropriate.
Processor Status after use:
+------+-------------------+--------------------------+
| Flag | Description | State |
+======+===================+==========================+
| C | Carry Flag | Not affected |
+------+-------------------+--------------------------+
| Z | Zero Flag | Set is A = 0 |
+------+-------------------+--------------------------+
| I | Interrupt Disable | Not affected |
+------+-------------------+--------------------------+
| D | Decimal Mode Flag | Not affected |
+------+-------------------+--------------------------+
| B | Break Command | Not affected |
+------+-------------------+--------------------------+
| V | Overflow Flag | Not affected |
+------+-------------------+--------------------------+
| N | Negative Flag | Set if bit 7 of A is set |
+------+-------------------+--------------------------+
+-----------------+--------+-------+--------+
| Addressing Mode | Opcode | Bytes | Cycles |
+=================+========+=======+========+
| Implied | 0x8A | 1 | 2 |
+-----------------+--------+-------+--------+
See also: TAX
"""
import pytest
import m6502
@pytest.mark.parametrize(
"value, flag_n, flag_z", [
(0x0F, False, False),
(0x00, False, True),
(0xF0, True, False),
])
def test_cpu_ins_txa_imm(value: int, flag_n: bool, flag_z: bool) -> None:
"""
Transfer Accumulator, Implied.
return: None
"""
memory = m6502.Memory()
cpu = m6502.Processor(memory)
cpu.reset()
cpu.reg_a = 0x00
cpu.reg_x = value
memory[0xFCE2] = 0x8A
cpu.execute(2)
assert (
cpu.program_counter,
cpu.stack_pointer,
cpu.cycles,
cpu.flag_n,
cpu.flag_z,
cpu.reg_a,
) == (0xFCE3, 0x01FD, 2, flag_n, flag_z, value)
| 30.823529
| 75
| 0.380725
|
"""
TXA - Transfer Register X to Accumulator.
A = X
Copies the current contents of the X register into the accumulator and sets
the zero and negative flags as appropriate.
Processor Status after use:
+------+-------------------+--------------------------+
| Flag | Description | State |
+======+===================+==========================+
| C | Carry Flag | Not affected |
+------+-------------------+--------------------------+
| Z | Zero Flag | Set is A = 0 |
+------+-------------------+--------------------------+
| I | Interrupt Disable | Not affected |
+------+-------------------+--------------------------+
| D | Decimal Mode Flag | Not affected |
+------+-------------------+--------------------------+
| B | Break Command | Not affected |
+------+-------------------+--------------------------+
| V | Overflow Flag | Not affected |
+------+-------------------+--------------------------+
| N | Negative Flag | Set if bit 7 of A is set |
+------+-------------------+--------------------------+
+-----------------+--------+-------+--------+
| Addressing Mode | Opcode | Bytes | Cycles |
+=================+========+=======+========+
| Implied | 0x8A | 1 | 2 |
+-----------------+--------+-------+--------+
See also: TAX
"""
import pytest
import m6502
@pytest.mark.parametrize(
"value, flag_n, flag_z", [
(0x0F, False, False),
(0x00, False, True),
(0xF0, True, False),
])
def test_cpu_ins_txa_imm(value: int, flag_n: bool, flag_z: bool) -> None:
"""
Transfer Accumulator, Implied.
return: None
"""
memory = m6502.Memory()
cpu = m6502.Processor(memory)
cpu.reset()
cpu.reg_a = 0x00
cpu.reg_x = value
memory[0xFCE2] = 0x8A
cpu.execute(2)
assert (
cpu.program_counter,
cpu.stack_pointer,
cpu.cycles,
cpu.flag_n,
cpu.flag_z,
cpu.reg_a,
) == (0xFCE3, 0x01FD, 2, flag_n, flag_z, value)
| 0
| 0
| 0
|
68da65c7b02b2e46698372cf6c71f0e9a96ad209
| 392
|
py
|
Python
|
HSCTF/crypto/Randomization 1/solve.py
|
deut-erium/WriteUps
|
36b4193f5fab9f95527a48626ecba631d5a03796
|
[
"MIT"
] | 11
|
2020-06-06T05:28:27.000Z
|
2022-01-09T00:42:49.000Z
|
2020/HSCTF/crypto/Randomization 1/solve.py
|
CSEA-IITB/WriteUps
|
46e7f36b0c4ef182cbaf375fd10fda954b6667a0
|
[
"MIT"
] | 1
|
2020-09-06T18:19:55.000Z
|
2020-09-06T18:19:55.000Z
|
HSCTF/crypto/Randomization 1/solve.py
|
deut-erium/WriteUps
|
36b4193f5fab9f95527a48626ecba631d5a03796
|
[
"MIT"
] | 6
|
2020-06-06T05:36:43.000Z
|
2021-08-11T10:17:18.000Z
|
import pwn
HOST, PORT = "crypto.hsctf.com", 6001
rem = pwn.remote(HOST, PORT)
rem.recvline()
data = rem.recvline()
initial = data.decode().strip().split(':')[-1]
print(initial)
initial = int(initial)
for i in range(10):
rem.sendline(str(nextval(initial)).encode())
print(rem.recvline().decode())
initial = nextval(initial)
| 19.6
| 48
| 0.668367
|
import pwn
HOST, PORT = "crypto.hsctf.com", 6001
rem = pwn.remote(HOST, PORT)
rem.recvline()
data = rem.recvline()
def nextval(num):
return (num*0x25 + 0x41)&0xff
initial = data.decode().strip().split(':')[-1]
print(initial)
initial = int(initial)
for i in range(10):
rem.sendline(str(nextval(initial)).encode())
print(rem.recvline().decode())
initial = nextval(initial)
| 30
| 0
| 23
|
f43edadce6082236a716b23df8d2ddde42461d1b
| 4,086
|
py
|
Python
|
camply/utils/yaml_utils.py
|
juftin/camply
|
d365ed10a62248edc428a68a8ee96743c4b8fa98
|
[
"MIT"
] | 123
|
2021-05-19T04:56:47.000Z
|
2022-03-23T19:04:45.000Z
|
camply/utils/yaml_utils.py
|
juftin/camply
|
d365ed10a62248edc428a68a8ee96743c4b8fa98
|
[
"MIT"
] | 11
|
2021-05-25T20:22:14.000Z
|
2022-03-05T16:31:32.000Z
|
camply/utils/yaml_utils.py
|
juftin/camply
|
62d1a4423710a5e0f0366b5e9204b3639b358070
|
[
"MIT"
] | 21
|
2021-05-24T05:53:24.000Z
|
2022-03-31T02:03:41.000Z
|
#!/usr/bin/env python3
# Author:: Justin Flannery (mailto:juftin@juftin.com)
"""
YAML Utilities for Camply
"""
from datetime import datetime
import logging
import os
from pathlib import Path
from re import compile
from typing import Dict, Tuple
from yaml import load, SafeLoader
from camply.config import SearchConfig
from camply.containers import SearchWindow
logger = logging.getLogger(__name__)
def read_yml(path: str = None):
"""
Load a yaml configuration file_path (path) or data object (data)
and resolve any environment variables. The environment
variables must be in this format to be parsed: ${VAR_NAME}.
Parameters
----------
path: str
File Path of YAML Object to Read
Examples
----------
database:
host: ${HOST}
port: ${PORT}
${KEY}: ${VALUE}
app:
log_path: "/var/${LOG_PATH}"
something_else: "${AWESOME_ENV_VAR}/var/${A_SECOND_AWESOME_VAR}"
"""
path = os.path.abspath(path)
pattern = compile(r".*?\${(\w+)}.*?")
safe_loader = SafeLoader
safe_loader.add_implicit_resolver(tag=None, regexp=pattern, first=None)
def env_var_constructor(safe_loader: object, node: object):
"""
Extracts the environment variable from the node's value
:param yaml.Loader safe_loader: the yaml loader
:param node: the current node in the yaml
:return: the parsed string that contains the value of the environment
variable
"""
value = safe_loader.construct_scalar(node=node)
match = pattern.findall(string=value)
if match:
full_value = value
for item in match:
full_value = full_value.replace(
"${{{key}}}".format(key=item), os.getenv(key=item, default=item))
return full_value
return value
safe_loader.add_constructor(tag=None, constructor=env_var_constructor)
with open(path) as conf_data:
return load(stream=conf_data, Loader=safe_loader)
def yaml_file_to_arguments(file_path: str) -> Tuple[str, Dict[str, object], Dict[str, object]]:
"""
Convert YAML File into A Dictionary to be used as **kwargs
Parameters
----------
file_path: str
File Path to YAML
Returns
-------
provider, provider_kwargs, search_kwargs: Tuple[str, Dict[str, object], Dict[str, object]]
Tuple containing provider string, provider **kwargs, and search **kwargs
"""
yaml_search = read_yml(path=file_path)
logger.info(f"YML File Parsed: {Path(file_path).name}")
provider = yaml_search.get("provider", "RecreationDotGov")
start_date = datetime.strptime(str(yaml_search["start_date"]), "%Y-%m-%d")
end_date = datetime.strptime(str(yaml_search["end_date"]), "%Y-%m-%d")
nights = int(yaml_search.get("nights", 1))
recreation_area = yaml_search.get("recreation_area", None)
campgrounds = yaml_search.get("campgrounds", None)
weekends_only = yaml_search.get("weekends", False)
continuous = yaml_search.get("continuous", True)
polling_interval = yaml_search.get("polling_interval",
SearchConfig.RECOMMENDED_POLLING_INTERVAL)
notify_first_try = yaml_search.get("notify_first_try", False)
notification_provider = yaml_search.get("notifications", "silent")
search_forever = yaml_search.get("search_forever", False)
search_window = SearchWindow(start_date=start_date, end_date=end_date)
provider_kwargs = dict(search_window=search_window,
recreation_area=recreation_area,
campgrounds=campgrounds,
weekends_only=weekends_only,
nights=nights)
search_kwargs = dict(
log=True, verbose=True,
continuous=continuous,
polling_interval=polling_interval,
notify_first_try=notify_first_try,
notification_provider=notification_provider,
search_forever=search_forever)
return provider, provider_kwargs, search_kwargs
| 34.627119
| 95
| 0.659325
|
#!/usr/bin/env python3
# Author:: Justin Flannery (mailto:juftin@juftin.com)
"""
YAML Utilities for Camply
"""
from datetime import datetime
import logging
import os
from pathlib import Path
from re import compile
from typing import Dict, Tuple
from yaml import load, SafeLoader
from camply.config import SearchConfig
from camply.containers import SearchWindow
logger = logging.getLogger(__name__)
def read_yml(path: str = None):
"""
Load a yaml configuration file_path (path) or data object (data)
and resolve any environment variables. The environment
variables must be in this format to be parsed: ${VAR_NAME}.
Parameters
----------
path: str
File Path of YAML Object to Read
Examples
----------
database:
host: ${HOST}
port: ${PORT}
${KEY}: ${VALUE}
app:
log_path: "/var/${LOG_PATH}"
something_else: "${AWESOME_ENV_VAR}/var/${A_SECOND_AWESOME_VAR}"
"""
path = os.path.abspath(path)
pattern = compile(r".*?\${(\w+)}.*?")
safe_loader = SafeLoader
safe_loader.add_implicit_resolver(tag=None, regexp=pattern, first=None)
def env_var_constructor(safe_loader: object, node: object):
"""
Extracts the environment variable from the node's value
:param yaml.Loader safe_loader: the yaml loader
:param node: the current node in the yaml
:return: the parsed string that contains the value of the environment
variable
"""
value = safe_loader.construct_scalar(node=node)
match = pattern.findall(string=value)
if match:
full_value = value
for item in match:
full_value = full_value.replace(
"${{{key}}}".format(key=item), os.getenv(key=item, default=item))
return full_value
return value
safe_loader.add_constructor(tag=None, constructor=env_var_constructor)
with open(path) as conf_data:
return load(stream=conf_data, Loader=safe_loader)
def yaml_file_to_arguments(file_path: str) -> Tuple[str, Dict[str, object], Dict[str, object]]:
"""
Convert YAML File into A Dictionary to be used as **kwargs
Parameters
----------
file_path: str
File Path to YAML
Returns
-------
provider, provider_kwargs, search_kwargs: Tuple[str, Dict[str, object], Dict[str, object]]
Tuple containing provider string, provider **kwargs, and search **kwargs
"""
yaml_search = read_yml(path=file_path)
logger.info(f"YML File Parsed: {Path(file_path).name}")
provider = yaml_search.get("provider", "RecreationDotGov")
start_date = datetime.strptime(str(yaml_search["start_date"]), "%Y-%m-%d")
end_date = datetime.strptime(str(yaml_search["end_date"]), "%Y-%m-%d")
nights = int(yaml_search.get("nights", 1))
recreation_area = yaml_search.get("recreation_area", None)
campgrounds = yaml_search.get("campgrounds", None)
weekends_only = yaml_search.get("weekends", False)
continuous = yaml_search.get("continuous", True)
polling_interval = yaml_search.get("polling_interval",
SearchConfig.RECOMMENDED_POLLING_INTERVAL)
notify_first_try = yaml_search.get("notify_first_try", False)
notification_provider = yaml_search.get("notifications", "silent")
search_forever = yaml_search.get("search_forever", False)
search_window = SearchWindow(start_date=start_date, end_date=end_date)
provider_kwargs = dict(search_window=search_window,
recreation_area=recreation_area,
campgrounds=campgrounds,
weekends_only=weekends_only,
nights=nights)
search_kwargs = dict(
log=True, verbose=True,
continuous=continuous,
polling_interval=polling_interval,
notify_first_try=notify_first_try,
notification_provider=notification_provider,
search_forever=search_forever)
return provider, provider_kwargs, search_kwargs
| 0
| 0
| 0
|
b31ed0506aad94a74db97c7798537089fe97130a
| 9,136
|
py
|
Python
|
TD/src/plots_nips2016.py
|
lucasgit/rl
|
1c4bbfad0b11c040ece2b9a384f3781de2c729ca
|
[
"MIT"
] | 1
|
2022-01-21T13:52:50.000Z
|
2022-01-21T13:52:50.000Z
|
TD/src/plots_nips2016.py
|
lucaslehnert/pgq
|
1c4bbfad0b11c040ece2b9a384f3781de2c729ca
|
[
"MIT"
] | null | null | null |
TD/src/plots_nips2016.py
|
lucaslehnert/pgq
|
1c4bbfad0b11c040ece2b9a384f3781de2c729ca
|
[
"MIT"
] | null | null | null |
'''
Created on May 23, 2016
@author: Lucas Lehnert (lucas.lehnert@mail.mcgill.ca)
Script to generate all plots from the NIPS 2016 paper.
'''
import matplotlib
matplotlib.use( 'agg' )
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
from util.numpy_json import loadJSONResults
experimentDir = '../data/'
plotDir = '../plot/'
if not os.path.exists( plotDir ):
os.makedirs( plotDir )
if __name__ == '__main__':
main()
| 38.225941
| 118
| 0.611208
|
'''
Created on May 23, 2016
@author: Lucas Lehnert (lucas.lehnert@mail.mcgill.ca)
Script to generate all plots from the NIPS 2016 paper.
'''
import matplotlib
matplotlib.use( 'agg' )
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
from util.numpy_json import loadJSONResults
experimentDir = '../data/'
plotDir = '../plot/'
if not os.path.exists( plotDir ):
os.makedirs( plotDir )
def loadResults( globPath ):
dataFiles = glob.glob( globPath )
res = map( lambda df : loadJSONResults( df ), dataFiles )
return res
def episodeMSPBE( resultDict ):
episodeLengthMat = resultDict['results']['mspbe']
episodeLengthMean = np.mean( episodeLengthMat, axis=0 )
episodeLengthStd = np.std( episodeLengthMat, axis=0 )
return episodeLengthMean, episodeLengthStd
def episodeQnorm( resultDict ):
episodeLengthMat = resultDict['results']['qnorm']
episodeLengthMean = np.mean( episodeLengthMat, axis=0 )
episodeLengthStd = np.std( episodeLengthMat, axis=0 )
return episodeLengthMean, episodeLengthStd
def episodeLength( resultDict ):
episodeLengthMat = resultDict['results']['episodeLength']
episodeLengthMean = np.mean( episodeLengthMat, axis=0 )
episodeLengthStd = np.std( episodeLengthMat, axis=0 )
return episodeLengthMean, episodeLengthStd
def concatenateExperiments( experimentList ):
exp = {}
exp['configuration'] = experimentList[0]['configuration']
exp['experiment'] = experimentList[0]['experiment']
exp['results'] = {}
exp['results']['episodeLength'] = np.array( map( lambda e: e['results']['episodeLength'][0], experimentList ) )
exp['results']['successfulRepeats'] = np.sum( map( lambda e: e['results']['successfulRepeats'], experimentList ) )
exp['results']['thetaNorm'] = np.array( map( lambda e: e['results']['thetaNorm'][0], experimentList ) )
return exp
def makeBairdPolts():
global experimentDir, plotDir
resQ = loadResults( experimentDir + 'baird/baird-sweeps-Q.json' )[0][0]
resGQ = loadResults( experimentDir + 'baird/baird-sweeps-GQ.json' )[0][0]
resPGQ = loadResults( experimentDir + 'baird/baird-sweeps-PGQ.json' )[0][0]
plt.figure( figsize=( 4, 2.8 ) )
stdInterval = 50
resQ['results']['mspbe'] = map( lambda r : r[:200], resQ['results']['mspbeDiv'] )
m, v = episodeMSPBE( resQ )
plt.plot( range( len( m ) ), m, 'k', label='Q', linewidth=2 )
plt.errorbar( range( len( m ) )[stdInterval::stdInterval], m[stdInterval::stdInterval], \
yerr=v[stdInterval::stdInterval], ecolor='k', fmt=None, linewidth=1.5 )
m, v = episodeMSPBE( resGQ )
plt.plot( range( len( m ) ), m, 'g', label='GQ', linewidth=2 )
plt.errorbar( range( len( m ) )[stdInterval::stdInterval], m[stdInterval::stdInterval], \
yerr=v[stdInterval::stdInterval], ecolor='g', fmt=None, linewidth=1.5 )
m, v = episodeMSPBE( resPGQ )
plt.plot( range( len( m ) ), m, 'b', label='PGQ', linewidth=2 )
plt.errorbar( range( len( m ) )[stdInterval::stdInterval], m[stdInterval::stdInterval], \
yerr=v[stdInterval::stdInterval], ecolor='b', fmt=None, linewidth=1.5 )
plt.legend()
plt.ylim( [0, 3000] )
plt.ylabel( 'MSPBE' )
plt.xlabel( 'Update' )
plt.gcf().tight_layout()
# plt.show()
plt.savefig( plotDir + '/bd_all_sweep_mspbe.pdf' )
plt.figure( figsize=( 4, 2.8 ) )
stdInterval = 50
resQ['results']['qnorm'] = map( lambda r : r[:200], resQ['results']['qnormDiv'] )
m, v = episodeQnorm( resQ )
plt.plot( range( len( m ) ), m, 'k', label='Q', linewidth=2 )
plt.errorbar( range( len( m ) )[stdInterval::stdInterval], m[stdInterval::stdInterval], \
yerr=v[stdInterval::stdInterval], ecolor='k', fmt=None, linewidth=1.5 )
m, v = episodeQnorm( resGQ )
plt.plot( range( len( m ) ), m, 'g', label='GQ', linewidth=2 )
plt.errorbar( range( len( m ) )[stdInterval::stdInterval], m[stdInterval::stdInterval], \
yerr=v[stdInterval::stdInterval], ecolor='g', fmt=None, linewidth=1.5 )
m, v = episodeQnorm( resPGQ )
plt.plot( range( len( m ) ), m, 'b', label='PGQ', linewidth=2 )
plt.errorbar( range( len( m ) )[stdInterval::stdInterval], m[stdInterval::stdInterval], \
yerr=v[stdInterval::stdInterval], ecolor='b', fmt=None, linewidth=1.5 )
plt.legend()
plt.ylim( [0, 30] )
plt.ylabel( '$|| \pmb{Q} ||_\infty$' )
plt.xlabel( 'Update' )
plt.gcf().tight_layout()
# plt.show()
plt.savefig( plotDir + '/bd_all_sweep_qnorm.pdf' )
def makeMountainCarPlots():
global experimentDir, plotDir
res = loadResults( experimentDir + 'mountaincar/mc_all_[0-9][0-9][0-9][0-9].json' )
plt.figure( figsize=( 5, 3.4 ) )
resQ = filter( lambda r : r['configuration']['agent'] == 'Q', res )[0]
resGQ = filter( lambda r : r['configuration']['agent'] == 'GQ', res )[0]
resPGQ = filter( lambda r : r['configuration']['agent'] == 'PGQ', res )[0]
m, v = episodeLength( resQ )
plt.plot( range( len( m ) ), m, 'k', label='Q' )
plt.errorbar( range( len( m ) )[5::5], m[5::5], yerr=v[5::5], ecolor='k', fmt=None )
m, v = episodeLength( resGQ )
plt.plot( range( len( m ) ), m, 'g', label='GQ' )
plt.errorbar( range( len( m ) )[5::5], m[5::5], yerr=v[5::5], ecolor='g', fmt=None )
m, v = episodeLength( resPGQ )
plt.plot( range( len( m ) ), m, 'b', label='PGQ' )
plt.errorbar( range( len( m ) )[5::5], m[5::5], yerr=v[5::5], ecolor='b', fmt=None, capthick=1.5 )
plt.legend()
plt.ylim( [0, 10500] )
plt.ylabel( 'Episode Length' )
plt.xlabel( 'Episode' )
plt.gcf().tight_layout()
# plt.show()
plt.savefig( plotDir + '/mc_all_episode_length.pdf' )
plt.figure( figsize=( 5, 3.4 ) )
resGQ = filter( lambda r : r['configuration']['agent'] == 'GQ', res )[0]
resPGQ = filter( lambda r : r['configuration']['agent'] == 'PGQ', res )[0]
m, v = episodeLength( resGQ )
plt.plot( range( len( m ) ), m, 'g', label='GQ' )
plt.errorbar( range( len( m ) )[5::5], m[5::5], yerr=v[5::5], ecolor='g', fmt=None )
m, v = episodeLength( resPGQ )
plt.plot( range( len( m ) ), m, 'b', label='PGQ' )
plt.errorbar( range( len( m ) )[5::5], m[5::5], yerr=v[5::5], ecolor='b', fmt=None, capthick=1.5 )
plt.legend()
plt.ylim( [0, 10500] )
plt.ylabel( 'Episode Length' )
plt.xlabel( 'Episode' )
# plt.show()
plt.gcf().tight_layout()
# plt.show()
plt.savefig( plotDir + '/mc_GQ_PGQ_episode_length.pdf' )
def makeAcrobotPlots():
global experimentDir, plotDir
res = loadResults( experimentDir + 'acrobot/ac_all_1000_[0-9][0-9][0-9][0-9].json' )
resQ = concatenateExperiments( filter( lambda e: e['configuration']['agent'] == 'Q', res ) )
resGQ = concatenateExperiments( filter( lambda e: e['configuration']['agent'] == 'GQ', res ) )
resPGQ = concatenateExperiments( filter( lambda e: e['configuration']['agent'] == 'PGQ', res ) )
plt.figure( figsize=( 5, 3.4 ) )
stdInt = 100
m, v = episodeLength( resQ )
plt.plot( range( len( m ) ), m, 'k', label='Q', alpha=0.4 )
plt.errorbar( range( len( m ) )[stdInt::stdInt], m[stdInt::stdInt], yerr=v[stdInt::stdInt],
ecolor='k', fmt=None, capthick=2.5 )
m, v = episodeLength( resGQ )
plt.plot( range( len( m ) ), m, 'g', label='GQ', alpha=0.6 )
plt.errorbar( range( len( m ) )[stdInt::stdInt], m[stdInt::stdInt], yerr=v[stdInt::stdInt],
ecolor='g', fmt=None, capthick=2.5 )
m, v = episodeLength( resPGQ )
plt.plot( range( len( m ) ), m, 'b', label='PGQ', alpha=0.6 )
plt.errorbar( range( len( m ) )[stdInt::stdInt], m[stdInt::stdInt], yerr=v[stdInt::stdInt],
ecolor='b', fmt=None, capthick=2.5 )
plt.legend( ncol=3 )
plt.ylim( [0, 1800] )
# plt.gca().set_yscale('log')
plt.ylabel( 'Episode Length' )
plt.xlabel( 'Episode' )
plt.gcf().tight_layout()
plt.show()
plt.savefig( plotDir + '/ac_all_1000_episode_length.pdf' )
plt.figure( figsize=( 5, 3.4 ) )
stdInt = 100
m, v = episodeLength( resGQ )
plt.plot( range( len( m ) ), m, 'g', label='GQ', alpha=0.6 )
plt.errorbar( range( len( m ) )[stdInt::stdInt], m[stdInt::stdInt], yerr=v[stdInt::stdInt],
ecolor='g', fmt=None, capthick=2.5 )
m, v = episodeLength( resPGQ )
plt.plot( range( len( m ) ), m, 'b', label='PGQ', alpha=0.6 )
plt.errorbar( range( len( m ) )[stdInt::stdInt], m[stdInt::stdInt], yerr=v[stdInt::stdInt],
ecolor='b', fmt=None, capthick=2.5 )
plt.legend( ncol=3 )
plt.ylim( [0, 1800] )
# plt.gca().set_yscale('log')
plt.ylabel( 'Episode Length' )
plt.xlabel( 'Episode' )
plt.gcf().tight_layout()
# plt.show()
plt.savefig( plotDir + '/ac_GQ_PGQ_1000_episode_length.pdf' )
def main():
makeBairdPolts()
makeMountainCarPlots()
makeAcrobotPlots()
return
if __name__ == '__main__':
main()
| 8,361
| 0
| 206
|
7685a9c20c104421e0db1cc9e039a8d431b744a3
| 852
|
py
|
Python
|
project/Support/Code/actions/_accounts/account_group/edit/basic.py
|
fael07/Blog-Django-with-CBV
|
269747b2e663a34b99acae6368db49c6ad37c2b8
|
[
"MIT"
] | null | null | null |
project/Support/Code/actions/_accounts/account_group/edit/basic.py
|
fael07/Blog-Django-with-CBV
|
269747b2e663a34b99acae6368db49c6ad37c2b8
|
[
"MIT"
] | null | null | null |
project/Support/Code/actions/_accounts/account_group/edit/basic.py
|
fael07/Blog-Django-with-CBV
|
269747b2e663a34b99acae6368db49c6ad37c2b8
|
[
"MIT"
] | null | null | null |
from Support.Code.actions.Support.utils.functions_dict import get_name
from django.utils.text import slugify
| 38.727273
| 97
| 0.681925
|
from Support.Code.actions.Support.utils.functions_dict import get_name
from django.utils.text import slugify
def save_user_basic_and_update_user_save(request):
user = request.user
name: str = request.POST.get('name')
new_file = request.FILES.get('photo')
if new_file:
user.photo = new_file
user.name = get_name(name)
new_slug = slugify(name)
if request.session['user_save']['data']['author']:
new_slug = f'{new_slug}-{user.id}'
user.slug = new_slug
user.save()
request.session['user_save']['data']['name'] = get_name(name)
request.session['user_save']['data']['slug'] = new_slug
request.session['user_save']['data']['photo_url'] = user.photo.url
request.session.save()
user.my_static_pages = {**user.my_static_pages, 'data': request.session['user_save']['data']}
user.save()
| 720
| 0
| 23
|
434db8b2f563775855ef95e274774e84780d15b1
| 5,444
|
py
|
Python
|
tests/aggregate/elements.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/aggregate/elements.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/aggregate/elements.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
# Standard Library Imports
from datetime import datetime
from typing import List
# Protean
from protean.core.aggregate import BaseAggregate
from protean.core.entity import BaseEntity
from protean.core.field.association import HasMany, HasOne, Reference
from protean.core.field.basic import Auto, DateTime, Integer, String, Text
from protean.core.repository import BaseRepository
# Aggregates to test Identity
# Aggregates to test Subclassing
# Aggregates to test Abstraction # START #
# Aggregates to test Abstraction # END #
# Aggregates to test Meta Info overriding # START #
# Aggregates to test Meta Info overriding # END #
# Aggregates to test associations # START #
# Aggregates to test associations # END #
| 25.203704
| 88
| 0.722998
|
# Standard Library Imports
from datetime import datetime
from typing import List
# Protean
from protean.core.aggregate import BaseAggregate
from protean.core.entity import BaseEntity
from protean.core.field.association import HasMany, HasOne, Reference
from protean.core.field.basic import Auto, DateTime, Integer, String, Text
from protean.core.repository import BaseRepository
class Role(BaseAggregate):
name = String(max_length=15, required=True)
created_on = DateTime(default=datetime.today())
class Person(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class PersonRepository(BaseRepository):
def find_adults(self, age: int = 21) -> List[Person]:
pass # FIXME Implement filter method
# Aggregates to test Identity
class PersonAutoSSN(BaseAggregate):
ssn = Auto(identifier=True)
name = String(max_length=25)
class PersonExplicitID(BaseAggregate):
ssn = String(max_length=36, identifier=True)
name = String(max_length=25)
# Aggregates to test Subclassing
class SubclassRole(Role):
pass
# Aggregates to test Abstraction # START #
class AbstractRole(BaseAggregate):
foo = String(max_length=25)
class Meta:
abstract = True
class ConcreteRole(AbstractRole):
bar = String(max_length=25)
class FurtherAbstractRole(ConcreteRole):
foobar = String(max_length=25)
class Meta:
abstract = True
# Aggregates to test Abstraction # END #
# Aggregates to test Meta Info overriding # START #
class DbRole(BaseAggregate):
bar = String(max_length=25)
class Meta:
schema_name = "foosball"
class SqlRole(Role):
class Meta:
schema_name = "roles"
class DifferentDbRole(Role):
class Meta:
provider = "non-default"
class SqlDifferentDbRole(Role):
class Meta:
provider = "non-default-sql"
class OrderedRole(BaseAggregate):
bar = String(max_length=25)
class Meta:
order_by = "bar"
class OrderedRoleSubclass(Role):
bar = String(max_length=25)
class Meta:
order_by = "bar"
# Aggregates to test Meta Info overriding # END #
# Aggregates to test associations # START #
class Post(BaseAggregate):
content = Text(required=True)
comments = HasMany("tests.aggregate.elements.Comment")
author = Reference("tests.aggregate.elements.Author")
class PostVia(BaseAggregate):
content = Text(required=True)
comments = HasMany("tests.aggregate.elements.CommentVia", via="posting_id")
author = Reference("tests.aggregate.elements.Author")
class PostViaWithReference(BaseAggregate):
content = Text(required=True)
comments = HasMany(
"tests.aggregate.elements.CommentViaWithReference", via="posting_id"
)
author = Reference("tests.aggregate.elements.Author")
class Comment(BaseEntity):
content = Text()
added_on = DateTime()
post = Reference("tests.aggregate.elements.Post")
class Meta:
aggregate_cls = Post
class CommentVia(BaseEntity):
content = Text()
added_on = DateTime()
posting_id = String()
class Meta:
aggregate_cls = PostVia
class CommentViaWithReference(BaseEntity):
content = Text()
added_on = DateTime()
posting = Reference("tests.aggregate.elements.PostVia")
class Meta:
aggregate_cls = PostViaWithReference
class Account(BaseAggregate):
email = String(required=True, max_length=255, unique=True, identifier=True)
password = String(required=True, max_length=255)
username = String(max_length=255, unique=True)
author = HasOne("tests.aggregate.elements.Author")
class Author(BaseEntity):
first_name = String(required=True, max_length=25)
last_name = String(max_length=25)
posts = HasMany("tests.aggregate.elements.Post")
account = Reference("tests.aggregate.elements.Account")
class Meta:
aggregate_cls = Account
class AccountWithId(BaseAggregate):
email = String(required=True, max_length=255, unique=True)
password = String(required=True, max_length=255)
username = String(max_length=255, unique=True)
author = HasOne("tests.aggregate.elements.Author")
class AccountVia(BaseAggregate):
email = String(required=True, max_length=255, unique=True, identifier=True)
password = String(required=True, max_length=255)
username = String(max_length=255, unique=True)
profile = HasOne("tests.aggregate.elements.ProfileVia", via="account_email")
class AccountViaWithReference(BaseAggregate):
email = String(required=True, max_length=255, unique=True, identifier=True)
password = String(required=True, max_length=255)
username = String(max_length=255, unique=True)
profile = HasOne("tests.aggregate.elements.ProfileViaWithReference", via="ac_email")
class Profile(BaseAggregate):
about_me = Text()
account = Reference("tests.aggregate.elements.Account", via="username")
class ProfileWithAccountId(BaseAggregate):
about_me = Text()
account = Reference("tests.aggregate.elements.AccountWithId")
class ProfileVia(BaseAggregate):
profile_id = String(identifier=True)
about_me = Text()
account_email = String(max_length=255)
class ProfileViaWithReference(BaseAggregate):
about_me = Text()
ac = Reference("tests.aggregate.elements.AccountViaWithReference")
# Aggregates to test associations # END #
| 78
| 3,899
| 711
|
852e2170c0125511f1261888c3694601b5d7bab3
| 10,455
|
py
|
Python
|
bert_senteval.py
|
heartcored98/Trasnformer_Anatomy
|
2100f690947abe513d9e5fef9df0dd9e44e17a43
|
[
"MIT"
] | 16
|
2020-07-05T20:50:23.000Z
|
2021-04-26T20:13:27.000Z
|
bert_senteval.py
|
heartcored98/Trasnformer_Anatomy
|
2100f690947abe513d9e5fef9df0dd9e44e17a43
|
[
"MIT"
] | null | null | null |
bert_senteval.py
|
heartcored98/Trasnformer_Anatomy
|
2100f690947abe513d9e5fef9df0dd9e44e17a43
|
[
"MIT"
] | 3
|
2020-11-02T14:32:07.000Z
|
2021-12-15T13:20:15.000Z
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#%load_ext autoreload
#%autoreload 2
# In[ ]:
import sys
import torch
import numpy as np
import time
import hashlib
from os import listdir
from os.path import isfile, join
import pickle
import argparse
import json
from tqdm import tqdm
from copy import deepcopy
import os
from pytorch_pretrained_bert import BertTokenizer, BertModel
PATH_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data/'
PATH_TO_CACHE = './cache/'
sys.path.insert(0, PATH_SENTEVAL)
import senteval
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
# In[ ]:
def convert_sentences_to_features(sentences, seq_length, tokenizer):
"""Convert sentence into Tensor"""
num_sent = len(sentences)
input_type_ids = np.zeros((num_sent, seq_length), dtype=np.int32)
input_ids = np.zeros((num_sent, seq_length), dtype=np.int32)
input_mask = np.zeros((num_sent, seq_length), dtype=np.int32)
for idx, sent in enumerate(sentences):
tokens = tokenizer.tokenize(sent)
tokens = tokens[0:min((seq_length - 2), len(tokens))] # truncate tokens longer than seq_length
tokens.insert(0, "[CLS]")
tokens.append("[SEP]")
input_ids[idx,:len(tokens)] = np.array(tokenizer.convert_tokens_to_ids(tokens), dtype=np.int32)
input_mask[idx,:len(tokens)] = np.ones(len(tokens), dtype=np.int32)
assert len(input_ids[idx]) == seq_length
assert len(input_mask[idx]) == seq_length
assert len(input_type_ids[idx]) == seq_length
return input_ids, input_type_ids, input_mask
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
tasks = ['Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
parser = argparse.ArgumentParser(description='Evaluate BERT')
parser.add_argument("--device", type=list, default=[1,2])
parser.add_argument("--batch_size", type=int, default=500)
parser.add_argument("--nhid", type=int, default=0)
parser.add_argument("--kfold", type=int, default=5)
parser.add_argument("--usepytorch", type=bool, default=True)
parser.add_argument("--data_path", type=str, default='./SentEval/data/')
parser.add_argument("--cache_path", type=str, default='./cache/')
parser.add_argument("--result_path", type=str, default='./results/')
parser.add_argument("--optim", type=str, default='rmsprop')
parser.add_argument("--cbatch_size", type=int, default=512)
parser.add_argument("--tenacity", type=int, default=3)
parser.add_argument("--epoch_size", type=int, default=2)
parser.add_argument("--model_name", type=str, default='bert-base-uncased')
parser.add_argument("--task", type=int, default=0)
parser.add_argument("--layer", type=int, default=[0, 11])
parser.add_argument("--head", type=int, default=[-1, 11])
parser.add_argument("--head_size", type=int, default=64)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.device)
list_layer = range(args.layer[0], args.layer[1]+1) if len(args.layer) > 1 else [args.layer[0]]
list_head = range(args.head[0], args.head[1]+1) if len(args.head) > 1 else [args.head[0]]
num_exp = len(list(list_layer)) * len(list(list_head))
print("======= Benchmark Configuration ======")
print("Device: ", args.device)
print("model name: ", args.model_name)
print("Task: ", tasks[args.task])
print("range layer: ", list_layer)
print("range head: ", list_head)
print("Total Exps: ", num_exp)
print("======================================")
cnt = 0
target_task = tasks[args.task]
with tqdm(total=num_exp, file=sys.stdout) as pbar:
for layer in list_layer:
for head in list_head:
args.layer = layer
args.head = head
print()
experiment(args, target_task)
pbar.set_description('processed: %d' % (1 + cnt))
pbar.update(1)
cnt += 1
# In[ ]:
| 33.402556
| 190
| 0.616069
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#%load_ext autoreload
#%autoreload 2
# In[ ]:
import sys
import torch
import numpy as np
import time
import hashlib
from os import listdir
from os.path import isfile, join
import pickle
import argparse
import json
from tqdm import tqdm
from copy import deepcopy
import os
from pytorch_pretrained_bert import BertTokenizer, BertModel
PATH_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data/'
PATH_TO_CACHE = './cache/'
sys.path.insert(0, PATH_SENTEVAL)
import senteval
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
# In[ ]:
def convert_sentences_to_features(sentences, seq_length, tokenizer):
"""Convert sentence into Tensor"""
num_sent = len(sentences)
input_type_ids = np.zeros((num_sent, seq_length), dtype=np.int32)
input_ids = np.zeros((num_sent, seq_length), dtype=np.int32)
input_mask = np.zeros((num_sent, seq_length), dtype=np.int32)
for idx, sent in enumerate(sentences):
tokens = tokenizer.tokenize(sent)
tokens = tokens[0:min((seq_length - 2), len(tokens))] # truncate tokens longer than seq_length
tokens.insert(0, "[CLS]")
tokens.append("[SEP]")
input_ids[idx,:len(tokens)] = np.array(tokenizer.convert_tokens_to_ids(tokens), dtype=np.int32)
input_mask[idx,:len(tokens)] = np.ones(len(tokens), dtype=np.int32)
assert len(input_ids[idx]) == seq_length
assert len(input_mask[idx]) == seq_length
assert len(input_type_ids[idx]) == seq_length
return input_ids, input_type_ids, input_mask
# In[ ]:
def save_exp_result(exp_result):
exp_key = '{}_{}'.format(exp_result['layer'], exp_result['head'])
print(exp_key)
result_name = "{}_{}.json".format(exp_result['model_name'], exp_result['task'])
result_dir = exp_result['result_path']
onlyfiles = [f for f in listdir(result_dir) if isfile(join(result_dir, f))]
if result_name in onlyfiles:
with open(join(result_dir, result_name), 'r') as f:
results = json.load(f)
with open(join(result_dir, result_name), 'w') as f:
results[exp_key] = exp_result
json.dump(results, f)
print("Append exp result at {} with key {}".format(result_name, exp_key))
else:
results = {}
with open(join(result_dir, result_name), 'w') as f:
results[exp_key] = exp_result
json.dump(results, f)
print("Create new exp result at {} with key {}".format(result_name, exp_key))
# In[ ]:
def efficient_batcher(batch):
max_capacity = 3000
seq_length = max([len(tokens) for tokens in batch])
batch_size = len(batch)
mini_batch = max_capacity // seq_length + 1
return mini_batch
def prepare(params, samples):
cache_name = "{}_{}.pickle".format(params.model_name, params.current_task)
cache_dir = params.cache_path
onlyfiles = [f for f in listdir(cache_dir) if isfile(join(cache_dir, f))]
# ====== Look Up existing cache ====== #
if cache_name in onlyfiles:
print("cache found {}".format(cache_name))
with open(join(cache_dir, cache_name), 'rb') as f:
params['cache'] = pickle.load(f)
params['cache_flag'] = True
else:
print("cache not found. Construct BERT model")
params['cache'] = {}
params['cache_flag'] = False
# ====== Construct Model ====== #
model = BertModel.from_pretrained(args.model_name)
model = torch.nn.DataParallel(model)
tokenizer = BertTokenizer.from_pretrained(args.model_name, do_lower_case=True)
params['model'] = model
params_senteval['tokenizer'] = tokenizer
# ====== Initializ Counter ====== #
params['count'] = 0
def batcher(params, batch):
ts = time.time()
if params.cache_flag:
output = []
sentences = [' '.join(s) for s in batch]
for i, sent in enumerate(sentences):
hask_key = hashlib.sha256(sent.encode()).hexdigest()
output.append(params.cache[hask_key])
output = np.array(output)
else:
mini_batch_size = efficient_batcher(batch)
idx = 0
list_output = []
while idx < len(batch):
mini_batch = batch[idx:min(idx+mini_batch_size, len(batch))]
# ====== Token Preparation ====== #
params.model.eval()
seq_length = max([len(tokens) for tokens in mini_batch])
sentences = [' '.join(s) for s in mini_batch]
# ====== Convert to Tensor ====== #
input_ids, input_type_ids, input_mask = convert_sentences_to_features(sentences, seq_length, params.tokenizer)
input_ids = torch.Tensor(input_ids).long().cuda()
input_type_ids = torch.Tensor(input_type_ids).long().cuda()
input_mask = torch.Tensor(input_mask).long().cuda()
# ====== Encode Tokens ====== #
encoded_layers, _ = model(input_ids, input_type_ids, input_mask)
torch.cuda.synchronize()
output = np.array([layer[:, 0, :].detach().cpu().numpy() for layer in encoded_layers])
output = np.swapaxes(output, 0, 1)
list_output.append(output)
idx += mini_batch_size
# ====== Construct Cache ====== #
temp_cache = {}
for i, sent in enumerate(sentences):
hask_key = hashlib.sha256(sent.encode()).hexdigest()
temp_cache[hask_key] = output[i]
params.cache.update(temp_cache)
output = np.concatenate(list_output, 0)
te = time.time()
params.count += len(batch)
# ====== Extract Target Embedding (layer, head) ====== #
if params.head == -1:
embedding = output[:, params.layer, :]
else:
embedding = output[:, params.layer, params.head*params.head_size:(params.head+1)*params.head_size]
if params.count % 20000 == 0:
print('{:6}'.format(params.count), 'encoded result', output.shape, 'return result', embedding.shape, 'took', '{:2.3f}'.format(te-ts), 'process', '{:4.1f}'.format(len(batch)/(te-ts)))
return embedding
# In[ ]:
def experiment(args, task):
ts = time.time()
# ====== SentEval Engine Setting ====== #
params_senteval = {'task_path': args.data_path,
'usepytorch': args.usepytorch,
'seed': seed,
'batch_size': args.batch_size,
'nhid': args.nhid,
'kfold': args.kfold}
params_senteval['classifier'] = {'nhid': args.nhid, 'optim': args.optim, 'batch_size': args.cbatch_size,
'tenacity': args.tenacity, 'epoch_size': args.epoch_size}
# ====== Experiment Setting ====== #
params_senteval['model_name'] = args.model_name
params_senteval['cache_path'] = args.cache_path
params_senteval['result_path'] = args.result_path
params_senteval['layer'] = args.layer
params_senteval['head'] = args.head
params_senteval['head_size'] = args.head_size
# ====== Conduct Experiment ====== #
se = senteval.engine.SE(params_senteval, batcher, prepare)
result = se.eval([task])
# ====== Logging Experiment Result ====== #
exp_result = vars(deepcopy(args))
exp_result['task'] = task
exp_result['devacc'] = result[task]['devacc']
exp_result['acc'] = result[task]['acc']
save_exp_result(exp_result)
# ====== Save Cache ====== #
if not se.params.cache_flag:
cache_name = "{}_{}.pickle".format(se.params.model_name, se.params.current_task)
cache_dir = se.params.cache_path
with open(join(cache_dir, cache_name), 'wb') as f:
pickle.dump(se.params.cache, f, pickle.HIGHEST_PROTOCOL)
print("Saved cache {}".format(cache_name))
# ====== Reporting ====== #
te = time.time()
print("result: {}, took: {:3.1f} sec".format(result, te-ts))
# In[ ]:
tasks = ['Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
parser = argparse.ArgumentParser(description='Evaluate BERT')
parser.add_argument("--device", type=list, default=[1,2])
parser.add_argument("--batch_size", type=int, default=500)
parser.add_argument("--nhid", type=int, default=0)
parser.add_argument("--kfold", type=int, default=5)
parser.add_argument("--usepytorch", type=bool, default=True)
parser.add_argument("--data_path", type=str, default='./SentEval/data/')
parser.add_argument("--cache_path", type=str, default='./cache/')
parser.add_argument("--result_path", type=str, default='./results/')
parser.add_argument("--optim", type=str, default='rmsprop')
parser.add_argument("--cbatch_size", type=int, default=512)
parser.add_argument("--tenacity", type=int, default=3)
parser.add_argument("--epoch_size", type=int, default=2)
parser.add_argument("--model_name", type=str, default='bert-base-uncased')
parser.add_argument("--task", type=int, default=0)
parser.add_argument("--layer", type=int, default=[0, 11])
parser.add_argument("--head", type=int, default=[-1, 11])
parser.add_argument("--head_size", type=int, default=64)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.device)
list_layer = range(args.layer[0], args.layer[1]+1) if len(args.layer) > 1 else [args.layer[0]]
list_head = range(args.head[0], args.head[1]+1) if len(args.head) > 1 else [args.head[0]]
num_exp = len(list(list_layer)) * len(list(list_head))
print("======= Benchmark Configuration ======")
print("Device: ", args.device)
print("model name: ", args.model_name)
print("Task: ", tasks[args.task])
print("range layer: ", list_layer)
print("range head: ", list_head)
print("Total Exps: ", num_exp)
print("======================================")
cnt = 0
target_task = tasks[args.task]
with tqdm(total=num_exp, file=sys.stdout) as pbar:
for layer in list_layer:
for head in list_head:
args.layer = layer
args.head = head
print()
experiment(args, target_task)
pbar.set_description('processed: %d' % (1 + cnt))
pbar.update(1)
cnt += 1
# In[ ]:
| 6,340
| 0
| 115
|
0668d699300db3de6998a525c2564293163ee37d
| 2,963
|
py
|
Python
|
sensors_test.py
|
kipkemoimayor/Traffic_control_system
|
840c01b8c031d613524b87b91c8d938a33348b3c
|
[
"MIT"
] | 2
|
2019-06-15T09:58:08.000Z
|
2020-08-24T09:51:37.000Z
|
sensors_test.py
|
kipkemoimayor/Traffic_control_system
|
840c01b8c031d613524b87b91c8d938a33348b3c
|
[
"MIT"
] | null | null | null |
sensors_test.py
|
kipkemoimayor/Traffic_control_system
|
840c01b8c031d613524b87b91c8d938a33348b3c
|
[
"MIT"
] | 2
|
2019-06-26T07:30:05.000Z
|
2020-08-24T09:51:42.000Z
|
import RPi.GPIO as GPIO
import time
dis=0
while True:
# Setup triggers and Echos of all sensors
GPIO.setmode(GPIO.BOARD)
TRIG=11
ECHO=13
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(3,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.setup(5,GPIO.IN)
GPIO.setup(35,GPIO.OUT)
GPIO.setup(31,GPIO.OUT)
GPIO.setup(33,GPIO.IN)
GPIO.setup(29,GPIO.IN)
GPIO.setup(38,GPIO.OUT)
GPIO.setup(19,GPIO.OUT)
GPIO.setup(23,GPIO.IN)
GPIO.setup(21,GPIO.IN)
station1()
a=station1()
print(str(a)+'Station 1 OK')
station2()
b=station2()
print(str(b)+'Station 2 OK')
station4()
d=station4()
print(str(d)+'Station 4 OK')
station3()
c=station3()
print(str(c)+'Station 3 OK')
station6()
f=station6()
print(str(f)+'Station 6 OK')
station5()
e=station5()
print(str(e)+'Station 5 OK')
| 20.576389
| 45
| 0.552143
|
import RPi.GPIO as GPIO
import time
dis=0
while True:
# Setup triggers and Echos of all sensors
GPIO.setmode(GPIO.BOARD)
TRIG=11
ECHO=13
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(3,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.setup(5,GPIO.IN)
GPIO.setup(35,GPIO.OUT)
GPIO.setup(31,GPIO.OUT)
GPIO.setup(33,GPIO.IN)
GPIO.setup(29,GPIO.IN)
GPIO.setup(38,GPIO.OUT)
GPIO.setup(19,GPIO.OUT)
GPIO.setup(23,GPIO.IN)
GPIO.setup(21,GPIO.IN)
def station1():
GPIO.output(TRIG,True)
time.sleep(1)
GPIO.output(TRIG,False)
while GPIO.input(ECHO)==False:
start=time.time()
while GPIO.input(ECHO)==True:
end=time.time()
sig_time=end-start
distance=sig_time/0.000058
dis=round(distance,0)
return dis
station1()
a=station1()
print(str(a)+'Station 1 OK')
def station2():
GPIO.output(38,True)
time.sleep(1)
GPIO.output(38,False)
while GPIO.input(23)==False:
start=time.time()
while GPIO.input(23)==True:
end=time.time()
sig_time=end-start
distance=sig_time/0.000058
dis=round(distance,0)
return dis
station2()
b=station2()
print(str(b)+'Station 2 OK')
def station4():
GPIO.output(3,True)
time.sleep(1)
GPIO.output(3,False)
while GPIO.input(5)==0:
start=time.time()
while GPIO.input(5)==1:
end=time.time()
sig_time=end-start
distance=sig_time/0.000058
dis=round(distance,0)
return dis
station4()
d=station4()
print(str(d)+'Station 4 OK')
def station3():
GPIO.output(31,True)
time.sleep(1)
GPIO.output(31,False)
while GPIO.input(29)==False:
start=time.time()
while GPIO.input(29)==True:
end=time.time()
sig_time=end-start
distance=sig_time/0.000058
dis=round(distance,0)
return dis
station3()
c=station3()
print(str(c)+'Station 3 OK')
def station6():
GPIO.output(35,True)
time.sleep(1)
GPIO.output(35,False)
while GPIO.input(33)==False:
start=time.time()
while GPIO.input(33)==True:
end=time.time()
sig_time=end-start
distance=sig_time/0.000058
dis=round(distance,0)
return dis
station6()
f=station6()
print(str(f)+'Station 6 OK')
def station5():
GPIO.output(19,True)
time.sleep(1)
GPIO.output(19,False)
while GPIO.input(21)==False:
start=time.time()
while GPIO.input(21)==True:
end=time.time()
sig_time=end-start
distance=sig_time/0.000058
dis=round(distance,0)
return dis
station5()
e=station5()
print(str(e)+'Station 5 OK')
| 1,922
| 0
| 162
|
d7a3717a0624d7d45c29409cb86a2357bad037fd
| 2,829
|
py
|
Python
|
xbrlparser.py
|
emhlaos/bmv-scrapper
|
70df08cddae4c2b3e472c3c22e639fca07a14c86
|
[
"MIT"
] | 4
|
2018-03-01T03:22:45.000Z
|
2021-09-25T02:44:51.000Z
|
xbrlparser.py
|
emhlaos/bmv-scrapper
|
70df08cddae4c2b3e472c3c22e639fca07a14c86
|
[
"MIT"
] | null | null | null |
xbrlparser.py
|
emhlaos/bmv-scrapper
|
70df08cddae4c2b3e472c3c22e639fca07a14c86
|
[
"MIT"
] | 3
|
2020-04-22T15:10:29.000Z
|
2021-06-23T03:45:08.000Z
|
"""
Copyright C.C.:
Emiliano Hernandez Laos
https://github.com/emhlaos/
28/02/2018
"""
from urllib.request import urlopen
import os
from io import BytesIO
from zipfile import ZipFile
#LOAD FUNCTION:
currentdirectory = os.getcwd()
xbrldirectory = currentdirectory+"/xbrl"
if not os.path.exists(xbrldirectory): os.makedirs(xbrldirectory)
db = open(currentdirectory+"/babycaw.txt","r").read()
matrix = {}
rows = db.split("\n")
matrix["R.TIME"] = {}
n = 0
for t in rows[0].split(",")[1:]:
matrix["R.TIME"][n] = t
n=n+1
print(rows[1]," $$ ",rows[1].split(","))
for row in rows[1:]:
columns = row.split(",")
ticker = columns[0]
matrix[ticker]={}
n=0
for cell in columns[1:]:
matrix[ticker][n] = cell
n=n+1
#DOWNLOAD INFO:
revenue_matrix = matrix
allread = []
stocks = list(matrix.keys())[1:]
n=len(list(matrix["R.TIME"].keys()))
print(n,"\n",stocks)
for stock in stocks:
allread.append(stock)
for m in range(n):
print("Reading about "+stock)
if ".zip" in matrix[stock][m]:
with urlopen(matrix[stock][m]) as pzip:
with ZipFile(BytesIO(pzip.read())) as zp:
for file in zp.namelist():
print(file)
print("Dowloading: "+ stock + "_" + matrix["R.TIME"][m] + ".json")
try:
pjson = open(xbrldirectory+"/" + stock + "_" + matrix["R.TIME"][m] + ".json", "wb")
pjson.write(zp.read(file))
pjson.close()
except Exception as args:
print(args,"you got {}%".format(len(allread)/n))
teencow = open(currentdirectory+"/teencaw.txt", "w")
for riadboe in allread:
teencow.write(riadboe,"\n")
allread=[]
elif ".json" in matrix[stock][m]:
jsonurl = matrix[stock][m]
jsonresp = urlopen(jsonurl)
with urlopen(matrix[stock][m]) as pjson:
try:
print("Downloading",stock + "_" + matrix["R.TIME"][m] + ".json")
tempjson = open(xbrldirectory+"/" + stock + "_" + matrix["R.TIME"][m] + ".json", "wb")
tempjson.write(pjson.read())
tempjson.close()
except Exception as args:
print(args, "you got {}%".format(len(allread) / n),"ending at a json file JSUUN")
teencow = open(currentdirectory+"/teencaw.txt", "w")
for riadboe in allread:
teencow.write(riadboe, "\n")
allread = []
| 36.74026
| 112
| 0.487098
|
"""
Copyright C.C.:
Emiliano Hernandez Laos
https://github.com/emhlaos/
28/02/2018
"""
from urllib.request import urlopen
import os
from io import BytesIO
from zipfile import ZipFile
#LOAD FUNCTION:
currentdirectory = os.getcwd()
xbrldirectory = currentdirectory+"/xbrl"
if not os.path.exists(xbrldirectory): os.makedirs(xbrldirectory)
db = open(currentdirectory+"/babycaw.txt","r").read()
matrix = {}
rows = db.split("\n")
matrix["R.TIME"] = {}
n = 0
for t in rows[0].split(",")[1:]:
matrix["R.TIME"][n] = t
n=n+1
print(rows[1]," $$ ",rows[1].split(","))
for row in rows[1:]:
columns = row.split(",")
ticker = columns[0]
matrix[ticker]={}
n=0
for cell in columns[1:]:
matrix[ticker][n] = cell
n=n+1
#DOWNLOAD INFO:
revenue_matrix = matrix
allread = []
stocks = list(matrix.keys())[1:]
n=len(list(matrix["R.TIME"].keys()))
print(n,"\n",stocks)
for stock in stocks:
allread.append(stock)
for m in range(n):
print("Reading about "+stock)
if ".zip" in matrix[stock][m]:
with urlopen(matrix[stock][m]) as pzip:
with ZipFile(BytesIO(pzip.read())) as zp:
for file in zp.namelist():
print(file)
print("Dowloading: "+ stock + "_" + matrix["R.TIME"][m] + ".json")
try:
pjson = open(xbrldirectory+"/" + stock + "_" + matrix["R.TIME"][m] + ".json", "wb")
pjson.write(zp.read(file))
pjson.close()
except Exception as args:
print(args,"you got {}%".format(len(allread)/n))
teencow = open(currentdirectory+"/teencaw.txt", "w")
for riadboe in allread:
teencow.write(riadboe,"\n")
allread=[]
elif ".json" in matrix[stock][m]:
jsonurl = matrix[stock][m]
jsonresp = urlopen(jsonurl)
with urlopen(matrix[stock][m]) as pjson:
try:
print("Downloading",stock + "_" + matrix["R.TIME"][m] + ".json")
tempjson = open(xbrldirectory+"/" + stock + "_" + matrix["R.TIME"][m] + ".json", "wb")
tempjson.write(pjson.read())
tempjson.close()
except Exception as args:
print(args, "you got {}%".format(len(allread) / n),"ending at a json file JSUUN")
teencow = open(currentdirectory+"/teencaw.txt", "w")
for riadboe in allread:
teencow.write(riadboe, "\n")
allread = []
| 0
| 0
| 0
|
b56ad7a7bc1fe9805d9331eef23690d49bce762e
| 2,351
|
py
|
Python
|
hpedockerplugin/cmd/cmd_createshare.py
|
renovate-bot/python-hpedockerplugin
|
b7fa6b3193fa6dd42574585b4c621ff6a16babc9
|
[
"Apache-2.0"
] | 49
|
2016-06-14T22:25:40.000Z
|
2021-04-05T05:00:59.000Z
|
hpedockerplugin/cmd/cmd_createshare.py
|
imran-ansari/python-hpedockerplugin
|
e2726f48ac793dc894100e3772c40ce89bfe9bb8
|
[
"Apache-2.0"
] | 550
|
2016-07-25T12:01:12.000Z
|
2021-11-15T17:52:40.000Z
|
hpedockerplugin/cmd/cmd_createshare.py
|
imran-ansari/python-hpedockerplugin
|
e2726f48ac793dc894100e3772c40ce89bfe9bb8
|
[
"Apache-2.0"
] | 96
|
2016-06-01T22:07:03.000Z
|
2021-06-22T09:05:05.000Z
|
import six
from oslo_log import log as logging
from hpedockerplugin.cmd import cmd
from hpedockerplugin import exception
LOG = logging.getLogger(__name__)
| 40.534483
| 78
| 0.633348
|
import six
from oslo_log import log as logging
from hpedockerplugin.cmd import cmd
from hpedockerplugin import exception
LOG = logging.getLogger(__name__)
class CreateShareCmd(cmd.Cmd):
def __init__(self, file_mgr, share_args):
self._file_mgr = file_mgr
self._etcd = file_mgr.get_etcd()
self._fp_etcd = file_mgr.get_file_etcd()
self._mediator = file_mgr.get_mediator()
self._config = file_mgr.get_config()
self._backend = file_mgr.get_backend()
self._share_args = share_args
self._status = 'CREATING'
self._share_created_at_backend = False
self._share_created_in_etcd = False
def unexecute(self):
share_name = self._share_args['name']
LOG.info("cmd::unexecute: Removing share entry from ETCD: %s" %
share_name)
# Leaving the share entry in ETCD intact so that user can inspect
# the share and look for the reason of failure. Moreover, Docker
# daemon has the entry for this share as we returned success on the
# main thread. So it would be better that the user removes this failed
# share explicitly so that Docker daemon also updates its database
if self._share_created_at_backend:
LOG.info("CreateShareCmd:Undo Deleting share from backend: %s"
% share_name)
self._mediator.delete_share(self._share_args['id'])
LOG.info("CreateShareCmd:Undo Deleting fstore from backend: %s"
% share_name)
self._mediator.delete_file_store(self._share_args['fpg'],
share_name)
def execute(self):
share_name = self._share_args['name']
try:
LOG.info("Creating share %s on the backend" % share_name)
share_id = self._mediator.create_share(self._share_args)
self._share_created_at_backend = True
self._share_args['id'] = share_id
self._etcd.save_share(self._share_args)
self._share_created_in_etcd = True
except Exception as ex:
msg = "Share creation failed [share_name: %s, error: %s" %\
(share_name, six.text_type(ex))
LOG.error(msg)
self.unexecute()
raise exception.ShareCreationFailed(msg)
| 2,080
| 9
| 103
|
fd444aca0dbc53e3b47c5e154be553cc2f88d847
| 156
|
py
|
Python
|
tests/asp/cautious/sum.example11.cautious.asp.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/cautious/sum.example11.cautious.asp.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/cautious/sum.example11.cautious.asp.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
1 2 3 2 3 4 5
1 3 3 2 2 4 5
1 4 3 2 2 3 5
1 5 0 0
5 6 4 3 0 2 3 4 2 2 2
1 1 1 1 6
0
4 c
3 b
2 a
0
B+
0
B-
1
0
1
"""
output = """
INCOHERENT
"""
| 6.782609
| 21
| 0.487179
|
input = """
1 2 3 2 3 4 5
1 3 3 2 2 4 5
1 4 3 2 2 3 5
1 5 0 0
5 6 4 3 0 2 3 4 2 2 2
1 1 1 1 6
0
4 c
3 b
2 a
0
B+
0
B-
1
0
1
"""
output = """
INCOHERENT
"""
| 0
| 0
| 0
|
a7a70d0fc999c0ac172fafb16600e4829015a6d0
| 1,660
|
py
|
Python
|
code4step2/register_images.py
|
yukeyi/MCDS-Capstone
|
f7ce48fc5d3f5f96c1f29556585ed2338683c7d2
|
[
"MIT"
] | null | null | null |
code4step2/register_images.py
|
yukeyi/MCDS-Capstone
|
f7ce48fc5d3f5f96c1f29556585ed2338683c7d2
|
[
"MIT"
] | null | null | null |
code4step2/register_images.py
|
yukeyi/MCDS-Capstone
|
f7ce48fc5d3f5f96c1f29556585ed2338683c7d2
|
[
"MIT"
] | null | null | null |
import os
import shutil
import pickle as pkl
import numpy as np
import SimpleITK as sitk
from data_registration import RegHearts
LOAD_DIR = '/pylon5/ac5616p/Data/HeartSegmentationProject/CAP_challenge/CAP_challenge_training_set/test2/brain/total/'
'''
Generator function to get one pair of fixed and moving image at a time
(fixed, moving) are viewed as without order.
(a, b) is the same as (b, a), so (b, a) won't be registered
'''
'''
Register two images and
'''
if __name__ == '__main__':
main()
| 31.923077
| 118
| 0.655422
|
import os
import shutil
import pickle as pkl
import numpy as np
import SimpleITK as sitk
from data_registration import RegHearts
LOAD_DIR = '/pylon5/ac5616p/Data/HeartSegmentationProject/CAP_challenge/CAP_challenge_training_set/test2/brain/total/'
'''
Generator function to get one pair of fixed and moving image at a time
(fixed, moving) are viewed as without order.
(a, b) is the same as (b, a), so (b, a) won't be registered
'''
def get_pair():
patient_folders = os.listdir(LOAD_DIR)
for i in range(len(patient_folders)):
for j in range(i+1, len(patient_folders)):
fixed = patient_folders[i]
moving = patient_folders[j]
yield fixed, moving
'''
Register two images and
'''
def main():
error = []
for fixed_patient, moving_patient in get_pair():
reg = RegHearts(LOAD_DIR+fixed_patient+'/SA', LOAD_DIR+moving_patient+'/SA')
reg.gen_param_map()
try:
reg.register_imgs()
except:
error += [fixed_patient + ',' + moving_patient]
# make a new directory for storing transform parameter files wrt each moving patient
try:
os.mkdir(LOAD_DIR+fixed_patient+'/'+moving_patient)
except OSError:
pass
my_map = reg.elastixImageFilter.GetTransformParameterMap()
f = open(os.path.join(LOAD_DIR+fixed_patient, moving_patient, 'transform_map.pkl'), 'wb')
pkl.dump(my_map, f, 2) # this saves a python object to a pickle file
with open('pairs_not_registered.csv', 'w') as f:
for item in error:
f.write("%s\n" % item)
if __name__ == '__main__':
main()
| 1,109
| 0
| 44
|
d2dda46de2a802ec9a1d557c8bb1545bff13d2d7
| 4,235
|
py
|
Python
|
octoprint_LCD1602/__init__.py
|
Marien1993/LCD-octoprint
|
eae8fd9b2ab5a5799b25b6f0684b081c77cc0aad
|
[
"MIT"
] | null | null | null |
octoprint_LCD1602/__init__.py
|
Marien1993/LCD-octoprint
|
eae8fd9b2ab5a5799b25b6f0684b081c77cc0aad
|
[
"MIT"
] | null | null | null |
octoprint_LCD1602/__init__.py
|
Marien1993/LCD-octoprint
|
eae8fd9b2ab5a5799b25b6f0684b081c77cc0aad
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
LCD1602 Plugin for Octoprint
"""
from __future__ import absolute_import
from octoprint.printer.estimation import PrintTimeEstimator
import octoprint.plugin
import octoprint.events
from RPLCD.i2c import CharLCD
import time
import datetime
import os
import sys
from fake_rpi import printf
import fake_rpi
__plugin_name__ = "LCD1602 I2c display"
| 27.147436
| 120
| 0.651948
|
# coding=utf-8
"""
LCD1602 Plugin for Octoprint
"""
from __future__ import absolute_import
from octoprint.printer.estimation import PrintTimeEstimator
import octoprint.plugin
import octoprint.events
from RPLCD.i2c import CharLCD
import time
import datetime
import os
import sys
from fake_rpi import printf
import fake_rpi
class LCD1602Plugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.ProgressPlugin):
def __init__(self):
if (os.getenv('LCD1602_DOCKER')):
print('We are running in test environnement, no i2c device attached.')
try:
print('Loading fake_rpi instead of smbus2')
sys.modules['smbus2'] = fake_rpi.smbus
self.mylcd = fake_rpi.smbus.SMBus(1)
except:
print('Cannot load fake_rpi !')
else:
self.mylcd = CharLCD(i2c_expander='PCF8574', address=0x27, cols=16, rows=4, backlight_enabled=True, charmap='A00')
# create block for progress bar
self.block = bytearray(b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF')
self.block.append(255)
self.mylcd.create_char(1,self.block)
# init vars
self.start_date = 0
# create block for progress bar
#self.mylcd.create_char(1,self.block)
def JobIsDone(self,lcd):
# create final anim
self.birdy = [ '^_-']
for pos in range(0,13):
lcd.cursor_pos = (1,pos)
lcd.write_string(self.birdy[pos])
time.sleep(0.5)
lcd.clear()
lcd.write_string('Printer is gereed')
def on_after_startup(self):
mylcd = self.mylcd
self._logger.info("plugin initialized !")
def on_print_progress(self,storage,path,progress):
mylcd = self.mylcd
percent = int(progress/6.25)+1
completed = '\x01'*percent
mylcd.clear()
mylcd.write_string('Completed: '+str(progress)+'%')
mylcd.cursor_pos = (1,0)
mylcd.write_string(completed)
if progress==1 :
self.start_date=time.time()
if progress>10 and progress<100:
now=time.time()
elapsed=now-self.start_date
average=elapsed/(progress-1)
remaining=int((100-progress)*average)
remaining=str(datetime.timedelta(seconds=remaining))
mylcd.cursor_pos = (1,3)
mylcd.write_string(remaining)
if progress==100 :
self.JobIsDone(mylcd)
def on_event(self,event,payload):
mylcd = self.mylcd
if event in "Connected":
mylcd.clear()
mylcd.write_string('Verbonden met:')
mylcd.cursor_pos = (1,0)
mylcd.write_string(payload["port"])
if event in "Shutdown":
mylcd.clear()
mylcd.write_string('Tot snel!')
time.sleep(1)
mylcd._set_backlight_enabled(False)
mylcd.close()
if event in "PrinterStateChanged":
if payload["state_string"] in "Offline":
mylcd.clear()
mylcd.write_string('Octoprint is niet verbonden')
time.sleep(2)
mylcd.clear()
mylcd.write_string('saving a polar bear, eco mode ON')
time.sleep(5)
mylcd._set_backlight_enabled(False)
if payload["state_string"] in "Operational":
mylcd._set_backlight_enabled(True)
mylcd.clear()
mylcd.write_string('Printer is bezig')
if payload["state_string"] in "Cancelling":
mylcd.clear()
mylcd.write_string('Printeropdracht is afgebroken')
time.sleep(0.2)
if payload["state_string"] in "PrintCancelled":
mylcd.clear()
time.sleep(0.5)
mylcd.write_string(' Print opdracht is beeindicht ' )
time.sleep(2)
if payload["state_string"] in "Paused":
mylcd.clear()
time.sleep(0.5)
mylcd.write_string('Printer is gepauzeert')
if payload["state_string"] in "Resuming":
mylcd.clear()
mylcd.write_string('Printer gaat verder met printopdracht')
time.sleep(0.2)
__plugin_name__ = "LCD1602 I2c display"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = LCD1602Plugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
| 3,542
| 141
| 179
|
f282ae2d0aacafe56cfc3e396879274fc628595a
| 4,876
|
py
|
Python
|
assignment2/PythonApplication1.py
|
JungChaeMoon/room_escape
|
1417caf6b9cc6228b2f1faf533d73c986cc04704
|
[
"MIT"
] | null | null | null |
assignment2/PythonApplication1.py
|
JungChaeMoon/room_escape
|
1417caf6b9cc6228b2f1faf533d73c986cc04704
|
[
"MIT"
] | null | null | null |
assignment2/PythonApplication1.py
|
JungChaeMoon/room_escape
|
1417caf6b9cc6228b2f1faf533d73c986cc04704
|
[
"MIT"
] | null | null | null |
from bangtal import *
import random
import copy
import time
setGameOption(GameOption.INVENTORY_BUTTON, False)
setGameOption(GameOption.MESSAGE_BOX_BUTTON, False)
main_scene = Scene("퍼즐게임", "images/backgroud.PNG")
scene1 = Scene("Loopy 퍼즐", "images/backgroud.PNG")
scene2 = Scene("Lion 퍼즐", "images/backgroud.PNG")
help_message = showMessage("퍼즐 맞출 이미지를 클릭해주세요!!")
images = (
Object('images/loopy.jpg'),
Object('images/lion.jpg'),
Object('images/exit_button.png'),
Object('images/score.jpg'),
Object('images/another.jpg')
)
loopy_image = images[0]
loopy_image.locate(main_scene, 150, 50)
loopy_image.setScale(1.64)
loopy_image.show()
lion_image = images[1]
lion_image.locate(main_scene, 650, 50)
lion_image.setScale(0.7)
lion_image.show()
exit_button = images[2]
exit_button.locate(main_scene, 1150, 650)
exit_button.setScale(0.1)
exit_button.show()
blank = 8
game_board = []
init_board = []
start = 0
max_time = 987654321
loopy_max_score = 0
lion_max_score = 0
delta = [-1, 1, -3, 3]
Object.onMouseActionDefault = onMouseAction_piece
| 14.426036
| 209
| 0.561116
|
from bangtal import *
import random
import copy
import time
setGameOption(GameOption.INVENTORY_BUTTON, False)
setGameOption(GameOption.MESSAGE_BOX_BUTTON, False)
main_scene = Scene("퍼즐게임", "images/backgroud.PNG")
scene1 = Scene("Loopy 퍼즐", "images/backgroud.PNG")
scene2 = Scene("Lion 퍼즐", "images/backgroud.PNG")
help_message = showMessage("퍼즐 맞출 이미지를 클릭해주세요!!")
images = (
Object('images/loopy.jpg'),
Object('images/lion.jpg'),
Object('images/exit_button.png'),
Object('images/score.jpg'),
Object('images/another.jpg')
)
loopy_image = images[0]
loopy_image.locate(main_scene, 150, 50)
loopy_image.setScale(1.64)
loopy_image.show()
lion_image = images[1]
lion_image.locate(main_scene, 650, 50)
lion_image.setScale(0.7)
lion_image.show()
exit_button = images[2]
exit_button.locate(main_scene, 1150, 650)
exit_button.setScale(0.1)
exit_button.show()
blank = 8
game_board = []
init_board = []
start = 0
max_time = 987654321
loopy_max_score = 0
lion_max_score = 0
def hide_image():
loopy_image.hide()
lion_image.hide()
exit_button.hide()
def exit_on_mouse_action(x, y, action):
exit(0)
def loopy_on_mouse_action(x, y, action):
global game_board, init_board, start, images
start = 0
start = time.time()
hide_image()
game_board = []
init_board = []
for index in range(100):
piece = Object("images/loopy_" + str(index + 1) + ".jpg" )
piece.locate(scene1, 300 + 150 * (index % 3), 460 - 150 * (index // 3))
piece.setScale(0.61)
piece.show()
game_board.append(piece)
init_board.append(piece)
game_board[blank].hide()
for _ in range(3):
random_move(scene1)
# timer.onTimeout = onTimeout
# timer.start()
startGame(scene1)
def lion_on_mouse_action(x, y, action):
hide_image()
global game_board, init_board, start
start = 0
start = time.time()
game_board = []
init_board = []
for index in range(100):
piece = Object("images/lion_" + str(index + 1) + ".jpg" )
piece.locate(scene2, 300 + 150 * (index % 3), 460 - 150 * (index // 3))
piece.setScale(0.7)
piece.show()
game_board.append(piece)
init_board.append(piece)
game_board[blank].hide()
for _ in range(3):
random_move(scene2)
startGame(scene2)
def find_index(object):
global game_board
for index in range(9):
if game_board[index] == object: return index
def movable(index):
global blank
if index < 0: return False
if index > 8: return False
if index % 3 > 0 and index - 1 == blank: return True
if index % 3 < 2 and index + 1 == blank: return True
if index > 2 and index - 3 == blank: return True
if index < 6 and index + 3 == blank: return True
return False
delta = [-1, 1, -3, 3]
def random_move(obj):
global blank, delta
while True:
index = blank + delta[random.randrange(4)]
if movable(index): break
move(obj, index)
def move(obj, index):
global blank, game_board
game_board[index].locate(obj, 300 + 150 * (blank % 3), 460 - 150 * (blank // 3))
game_board[blank].locate(obj, 300 + 150 * (index % 3), 460 - 150 * (index // 3))
game_board[index], game_board[blank] = game_board[blank], game_board[index]
blank = index
def completed():
for index in range(9):
if game_board[index] != init_board[index]: return False
return True
def onMouseAction_piece(object, x, y, action):
global blank, start, max_time
sc = None
index = find_index(object)
if 'loopy' in object._file:
sc = scene1
else:
sc = scene2
if movable(index):
move(sc, index)
if completed():
score = time.time() - start
if max_time > score:
max_time = score
showMessage('The shortest time has been renewed. max score: {:2}, time: {:2} Completed!!!'.format(time.strftime('%H:%M:%S', time.gmtime(score)), time.strftime('%H:%M:%S', time.gmtime(score))))
else:
showMessage('Shortest time failed to break. max score: {:2}, time{:2} Completed!!!'.format(time.strftime('%H:%M:%S', time.gmtime(max_time)), time.strftime('%H:%M:%S', time.gmtime(score))))
Object.onMouseActionDefault = onMouseAction_piece
| 3,310
| 0
| 250
|
40b5fd4c0ce2924a38851bdad6bd0745fb2bd736
| 349
|
py
|
Python
|
akeydo/plugins/__init__.py
|
dangle/vfio-kvm
|
13ed6f6b2ebbc2e23afe267866e321a2fd51a337
|
[
"MIT"
] | 30
|
2021-01-15T18:22:26.000Z
|
2021-06-02T14:10:40.000Z
|
akeydo/plugins/__init__.py
|
dangle/vfio-kvm
|
13ed6f6b2ebbc2e23afe267866e321a2fd51a337
|
[
"MIT"
] | 11
|
2021-01-23T05:37:06.000Z
|
2021-04-21T21:50:37.000Z
|
akeydo/plugins/__init__.py
|
dangle/vfio-kvm
|
13ed6f6b2ebbc2e23afe267866e321a2fd51a337
|
[
"MIT"
] | null | null | null |
import sys
if sys.version_info < (3, 10):
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
from . import (
cpu,
devices,
gpu,
memory,
)
__all__ = (
"cpu",
"devices",
"gpu",
"installed_plugins",
"memory",
)
installed_plugins = entry_points(group=__name__)
| 13.96
| 48
| 0.647564
|
import sys
if sys.version_info < (3, 10):
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
from . import (
cpu,
devices,
gpu,
memory,
)
__all__ = (
"cpu",
"devices",
"gpu",
"installed_plugins",
"memory",
)
installed_plugins = entry_points(group=__name__)
| 0
| 0
| 0
|
5d36cde055910519c7a52a7522ef39460d4a9945
| 3,888
|
py
|
Python
|
pltools/train/module.py
|
PhoenixDL/PytorchLightningTools
|
86185062d4792e6d5eae002a5594bb7b900106a1
|
[
"MIT"
] | 3
|
2020-05-18T06:34:52.000Z
|
2020-07-17T07:11:57.000Z
|
pltools/train/module.py
|
PhoenixDL/PytorchLightningTools
|
86185062d4792e6d5eae002a5594bb7b900106a1
|
[
"MIT"
] | 6
|
2021-06-25T18:21:06.000Z
|
2021-06-25T18:21:32.000Z
|
pltools/train/module.py
|
PhoenixDL/PytorchLightningTools
|
86185062d4792e6d5eae002a5594bb7b900106a1
|
[
"MIT"
] | 1
|
2020-05-18T06:34:56.000Z
|
2020-05-18T06:34:56.000Z
|
from __future__ import annotations
import typing
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pltools.config import Config
transform_type = typing.Iterable[typing.Callable]
| 31.354839
| 72
| 0.590021
|
from __future__ import annotations
import typing
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pltools.config import Config
transform_type = typing.Iterable[typing.Callable]
class Module(pl.LightningModule):
def __init__(self,
hparams: Config,
model: torch.nn.Module,
train_data: DataLoader = None,
val_data: DataLoader = None,
test_data: DataLoader = None,
**kwargs):
super().__init__(**kwargs)
self.hparams = hparams
self.model = model
self.train_data = train_data
self.val_data = val_data
self.test_data = test_data
self._initial_optimizers = None
self._initial_forward = None
def forward(self, data: torch.Tensor, *args, **kwargs):
return self.model(data, *args, **kwargs)
@pl.data_loader
def train_dataloader(self) -> DataLoader:
if self.train_data is None:
return super().train_dataloader()
return self.train_data
@pl.data_loader
def val_dataloader(self) -> DataLoader:
if self.val_data is None:
return super().val_dataloader()
return self.val_data
@pl.data_loader
def test_dataloader(self) -> DataLoader:
if self.test_data is None:
return super().test_dataloader()
return self.test_data
@property
def val_data(self):
return self._get_internal_dataloader("val")
@val_data.setter
def val_data(self, loader):
self._set_internal_dataloader("val", loader)
@property
def test_data(self):
return self._get_internal_dataloader("test")
@test_data.setter
def test_data(self, loader):
self._set_internal_dataloader("test", loader)
def _get_internal_dataloader(self, name):
return getattr(self, f'_{name}_loader')
def _set_internal_dataloader(self, name, loader):
setattr(self, f'_{name}_loader', loader)
if (loader is not None and
hasattr(self, f'_lazy_{name}_dataloader')):
delattr(self, f'_lazy_{name}_dataloader')
def enable_tta(self,
trafos: transform_type = (),
inverse_trafos: transform_type = None,
tta_reduce: typing.Callable = None,
) -> None:
self._initial_forward = self.forward
self.forward = tta_wrapper(self.forward,
trafos=trafos,
inverse_trafos=inverse_trafos,
tta_reduce=tta_reduce,
)
def disable_tta(self) -> bool:
if self._initial_forward is not None:
self.forward = self._initial_forward
self._initial_forward = None
return True
else:
return False
def tta_wrapper(func: typing.Callable,
trafos: typing.Iterable[typing.Callable] = (),
inverse_trafos: typing.Iterable[typing.Callable] = None,
tta_reduce: typing.Callable = None,
) -> typing.Callable:
_trafo = (None, *trafos)
_inverse_trafos = (None, *inverse_trafos)
def tta_forward(data: torch.Tensor, *args,
**kwargs) -> typing.Any:
tta_preds = []
for idx, t in enumerate(_trafo):
tta_data = t(data) if t is not None else data
tta_pred = func(tta_data, *args, **kwargs)
if (_inverse_trafos is not None and
_inverse_trafos[idx] is not None):
tta_pred = _inverse_trafos[idx](tta_pred)
tta_preds.append(tta_pred)
if tta_reduce is not None:
tta_preds = tta_reduce(tta_preds)
return tta_preds
return tta_forward
| 3,126
| 493
| 46
|
5be2c7014a6a8285c9a8486e36cad38accdcfdce
| 959
|
py
|
Python
|
postprocessors.py
|
ahmetb/simplegauges
|
c5a1e809f4534f72c436141f3c506b252ebb6b40
|
[
"Apache-2.0"
] | 2
|
2015-02-14T22:26:36.000Z
|
2015-06-22T12:01:16.000Z
|
postprocessors.py
|
ahmetalpbalkan/simplegauges
|
c5a1e809f4534f72c436141f3c506b252ebb6b40
|
[
"Apache-2.0"
] | null | null | null |
postprocessors.py
|
ahmetalpbalkan/simplegauges
|
c5a1e809f4534f72c436141f3c506b252ebb6b40
|
[
"Apache-2.0"
] | 1
|
2019-04-15T13:45:11.000Z
|
2019-04-15T13:45:11.000Z
|
# coding: utf-8
from datetime import timedelta
from helpers import make_record
def day_fill(data, fill_value=None):
"""Given a data set with missing day values sorted by day, adds records
with value of `fill_value`
"""
return generic_day_fill(1, data, fill_value)
def week_fill(data, fill_value=None):
"""Given a sorted data set with missing week keys, adds records with
value of `fill_value`
"""
return generic_day_fill(7, data, fill_value)
| 28.205882
| 75
| 0.630865
|
# coding: utf-8
from datetime import timedelta
from helpers import make_record
def day_fill(data, fill_value=None):
"""Given a data set with missing day values sorted by day, adds records
with value of `fill_value`
"""
return generic_day_fill(1, data, fill_value)
def week_fill(data, fill_value=None):
"""Given a sorted data set with missing week keys, adds records with
value of `fill_value`
"""
return generic_day_fill(7, data, fill_value)
def generic_day_fill(day_interval, data, fill_value=None):
new_data = list()
prev = None
for dt in data:
if prev:
diff = (dt['key'] - prev['key']).days / day_interval
if diff > 1:
for i in range(1, diff):
new_date = prev['key'] + timedelta(days=i*day_interval)
new_data.append(make_record(new_date, fill_value))
new_data.append(dt)
prev = dt
return new_data
| 456
| 0
| 23
|
ac8a99e9b1eb5584ca24c2aca70dd9be5d5154e8
| 2,373
|
py
|
Python
|
Examples/AppKit/FieldGraph/CGraphModel.py
|
Khan/pyobjc-framework-Cocoa
|
f8b015ea2a72d8d78be6084fb12925c4785b8f1f
|
[
"MIT"
] | 132
|
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/FieldGraph/CGraphModel.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6
|
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/FieldGraph/CGraphModel.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27
|
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
from Foundation import NSObject
from objc import *
from AppKit import NSBezierPath
from fieldMath import *
#____________________________________________________________
| 28.939024
| 122
| 0.572693
|
from Foundation import NSObject
from objc import *
from AppKit import NSBezierPath
from fieldMath import *
#____________________________________________________________
class CGraphModel(NSObject):
def init(self):
self.field = [1.0, 1.12, 0.567]
self.phase = [degToRad(0), degToRad(152.6), degToRad(312.9-360)]
self.RMSGain = 0
self.spacing = degToRad(90)
return self
def getGraph(self):
path = NSBezierPath.bezierPath()
maxMag = 0
mag = self.fieldValue(0)
maxMag = max(maxMag, mag)
path.moveToPoint_(polarToRect((mag, 0)))
for deg in range(1, 359, 1):
r = (deg/180.0)*pi
mag = self.fieldValue(r)
maxMag = max(maxMag, mag)
path.lineToPoint_(polarToRect((mag, r)))
path.closePath()
return path, maxMag;
def fieldGain(self):
gain = 0
Et = self.field[0] + self.field[1] + self.field[2]
if Et: # Don't want to divide by zero in the pathological case
spacing = [0, self.spacing, 2*self.spacing]
# This could easily be optimized--but this is just anexample :-)
for i in range(3):
for j in range(3):
gain += self.field[i]*self.field[j] * cos(self.phase[j]-self.phase[i]) * bessel(spacing[j]-spacing[i])
gain = sqrt(gain) / Et
self.RMSGain = gain
return gain
def fieldValue(self, a):
# The intermedate values are used to more closely match standard field equations nomenclature
E0 = self.field[0]
E1 = self.field[1]
E2 = self.field[2]
B0 = self.phase[0]
B1 = self.phase[1] + self.spacing * cos(a)
B2 = self.phase[2] + 2 * self.spacing * cos(a)
phix = sin(B0) * E0 + sin(B1) * E1 + sin(B2) * E2
phiy = cos(B0) * E0 + cos(B1) * E1 + cos(B2) * E2
mag = hypot(phix, phiy)
return mag
def setField(self, tower, field):
self.field[tower] = field
def getField(self, tower):
return self.field[tower]
def setPhase(self, tower, phase):
self.phase[tower] = phase
def getPhase(self, tower):
return self.phase[tower]
def setSpacing(self, spacing):
self.spacing = spacing
def getSpacing(self):
return self.spacing
| 1,902
| 7
| 292
|
e8a306946d3872aaea3b9a534d70503e4797fe01
| 1,549
|
py
|
Python
|
opticspy/ray_tracing/tests/test1_spotdiagram.py
|
benJephunneh/opticspy
|
a0b841f60f7c053b05444c0e8886cd4a99c4d082
|
[
"MIT"
] | null | null | null |
opticspy/ray_tracing/tests/test1_spotdiagram.py
|
benJephunneh/opticspy
|
a0b841f60f7c053b05444c0e8886cd4a99c4d082
|
[
"MIT"
] | null | null | null |
opticspy/ray_tracing/tests/test1_spotdiagram.py
|
benJephunneh/opticspy
|
a0b841f60f7c053b05444c0e8886cd4a99c4d082
|
[
"MIT"
] | null | null | null |
from __future__ import division as division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import field
import traceray
import surface
import cal_tools
# test ray.py and traceray.py
# define rays
l1 = np.linspace(-5,5,10)
Pos1 = []
for i in l1:
for j in l1:
if i**2+j**2<25:
Pos1.append([i,j,0])
KLM = []
for i in Pos1:
KLM.append([0,0,1])
# define surface
surface1 = surface.Surface(number=1,radius = 10000000, thickness = 10, index = 1,STO=0) #object
surface2 = surface.Surface(number=2,radius = 20, thickness = 40, index = 2,STO=0) #surface i
surface3 = surface.Surface(number=3,radius = 10000000, thickness = 0, index = 1,STO=0) #image
raylist1 = []
raylist2 = []
for pos,klm in zip(Pos1,KLM):
ray1 = field.Field(Pos = pos, KLM = klm)
raylist1.append(ray1)
Pos_new_list,KLM_new_list = traceray.trace(raylist1,surface1,surface2)
x = []
y = []
z = []
for i in Pos_new_list:
x.append(i[0])
y.append(i[1])
z.append(i[2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, z, y)
ax.set_xlim3d(-6, 6)
ax.set_ylim3d(-6, 6)
ax.set_zlim3d(-6, 6)
plt.show()
for pos,klm in zip(Pos_new_list,KLM_new_list):
ray2 = field.Field(Pos = pos, KLM = klm)
raylist2.append(ray2)
Pos_new_list1,KLM_new_list1 = traceray.trace(raylist2, surface2, surface3)
x2 = []
y2 = []
z2 = []
for i in Pos_new_list1:
x2.append(i[0])
y2.append(i[1])
z2.append(i[2])
fig = plt.figure()
plt.plot(x2,y2,'b*')
plt.show()
rms = cal_tools.rms(Pos_new_list1)
print rms
| 19.123457
| 95
| 0.684312
|
from __future__ import division as division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import field
import traceray
import surface
import cal_tools
# test ray.py and traceray.py
# define rays
l1 = np.linspace(-5,5,10)
Pos1 = []
for i in l1:
for j in l1:
if i**2+j**2<25:
Pos1.append([i,j,0])
KLM = []
for i in Pos1:
KLM.append([0,0,1])
# define surface
surface1 = surface.Surface(number=1,radius = 10000000, thickness = 10, index = 1,STO=0) #object
surface2 = surface.Surface(number=2,radius = 20, thickness = 40, index = 2,STO=0) #surface i
surface3 = surface.Surface(number=3,radius = 10000000, thickness = 0, index = 1,STO=0) #image
raylist1 = []
raylist2 = []
for pos,klm in zip(Pos1,KLM):
ray1 = field.Field(Pos = pos, KLM = klm)
raylist1.append(ray1)
Pos_new_list,KLM_new_list = traceray.trace(raylist1,surface1,surface2)
x = []
y = []
z = []
for i in Pos_new_list:
x.append(i[0])
y.append(i[1])
z.append(i[2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, z, y)
ax.set_xlim3d(-6, 6)
ax.set_ylim3d(-6, 6)
ax.set_zlim3d(-6, 6)
plt.show()
for pos,klm in zip(Pos_new_list,KLM_new_list):
ray2 = field.Field(Pos = pos, KLM = klm)
raylist2.append(ray2)
Pos_new_list1,KLM_new_list1 = traceray.trace(raylist2, surface2, surface3)
x2 = []
y2 = []
z2 = []
for i in Pos_new_list1:
x2.append(i[0])
y2.append(i[1])
z2.append(i[2])
fig = plt.figure()
plt.plot(x2,y2,'b*')
plt.show()
rms = cal_tools.rms(Pos_new_list1)
print rms
| 0
| 0
| 0
|
0584d6f43da520c5c77daa1e83965714f77af218
| 2,050
|
py
|
Python
|
core/test/mime/test_mime_codec_register.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | 3
|
2021-06-20T02:24:10.000Z
|
2022-01-26T23:55:33.000Z
|
core/test/mime/test_mime_codec_register.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | null | null | null |
core/test/mime/test_mime_codec_register.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from unittest import TestCase, main
from recc.mime.mime_codec_register import get_global_mime_register
if __name__ == "__main__":
main()
| 36.607143
| 71
| 0.692683
|
# -*- coding: utf-8 -*-
from unittest import TestCase, main
from recc.mime.mime_codec_register import get_global_mime_register
class _ComplexObject:
def __init__(self):
self.test_data1 = "text"
self.test_data2 = 100
self.test_data3 = 3.14
class MimeCodecRegisterTestCase(TestCase):
def test_binary(self):
test_data = {"aa": 11, "bb": 22.5, "cc": [1, 2, 3]}
codec = get_global_mime_register()
encoded_data = codec.encode_binary(test_data)
self.assertIsInstance(encoded_data, bytes)
decoded_data = codec.decode_binary(encoded_data)
self.assertIsInstance(decoded_data, dict)
self.assertEqual(decoded_data, test_data)
def test_binary_complex(self):
test_data = _ComplexObject()
codec = get_global_mime_register()
encoded_data = codec.encode_binary(test_data)
self.assertIsInstance(encoded_data, bytes)
decoded_data = codec.decode_binary(encoded_data)
self.assertIsInstance(decoded_data, _ComplexObject)
self.assertEqual(decoded_data.test_data1, test_data.test_data1)
self.assertEqual(decoded_data.test_data2, test_data.test_data2)
self.assertEqual(decoded_data.test_data3, test_data.test_data3)
def test_json(self):
test_data = {"aa": 11, "bb": 22.5, "cc": [1, 2, 3]}
codec = get_global_mime_register()
encoded_data = codec.encode_json(test_data)
self.assertIsInstance(encoded_data, bytes)
decoded_data = codec.decode_json(encoded_data)
self.assertIsInstance(decoded_data, dict)
self.assertEqual(decoded_data, test_data)
def test_text(self):
test_data = "Hello, World!"
codec = get_global_mime_register()
encoded_data = codec.encode_text(test_data)
self.assertIsInstance(encoded_data, bytes)
decoded_data = codec.decode_text(encoded_data)
self.assertIsInstance(decoded_data, str)
self.assertEqual(decoded_data, test_data)
if __name__ == "__main__":
main()
| 1,680
| 21
| 179
|
59330cfcf5414e3f86a1a7f10c339aa1302b5819
| 4,186
|
py
|
Python
|
concise_fanyi.py
|
Yo-gurts/dict
|
86e662ba9b7599473332c61de05635e8dce24f83
|
[
"MIT"
] | null | null | null |
concise_fanyi.py
|
Yo-gurts/dict
|
86e662ba9b7599473332c61de05635e8dce24f83
|
[
"MIT"
] | null | null | null |
concise_fanyi.py
|
Yo-gurts/dict
|
86e662ba9b7599473332c61de05635e8dce24f83
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.7
import sys
import json
import signal
import urllib.request as urllib
import threading
import pyperclip
import time
if __name__ == '__main__':
exitflag = False
try:
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGTERM, quit)
thread1 = Clipboard()
thread2 = Outinput()
thread1.setDaemon(True)
thread1.start()
thread2.setDaemon(True)
thread2.start()
thread1.join()
thread2.join()
print("bye!!")
except:
print()
| 29.272727
| 115
| 0.4742
|
#!/usr/bin/env python3.7
import sys
import json
import signal
import urllib.request as urllib
import threading
import pyperclip
import time
class Dict:
key = '716426270'
keyFrom = 'wufeifei'
api = 'http://fanyi.youdao.com/openapi.do?keyfrom=wufeifei&key=716426270&type=data&doctype=json&version=1.1&q='
content = None
def __init__(self, argv):
try:
self.api = self.api + urllib.quote(argv)
self.translate()
except:
print("Input invalid!!")
def translate(self):
content = urllib.urlopen(self.api).read()
self.content = json.loads(content)
self.parse()
def parse(self):
code = self.content['errorCode']
if code == 0: # Success
try:
u = self.content['basic']['us-phonetic'] # English
e = self.content['basic']['uk-phonetic']
except KeyError:
try:
c = self.content['basic']['phonetic'] # Chinese
except KeyError:
c = 'None'
u = 'None'
e = 'None'
try:
explains = self.content['basic']['explains']
except KeyError:
explains = 'None'
print('\033[1;31m################################### \033[0m')
# flag
#print('\033[1;31m# \033[0m', self.content['query'], self.content['translation'][0], end="")
print('\033[1;31m# \033[0m', self.content['query'], self.content['translation'][0])
if u != 'None':
print('(U:', u, 'E:', e, ')')
elif c != 'None':
print('(Pinyin:', c, ')')
else:
print()
if explains != 'None':
for i in range(0, len(explains)):
print('\033[1;31m# \033[0m', explains[i])
else:
print('\033[1;31m# \033[0m Explains None')
print('\033[1;31m################################### \033[0m')
# Phrase
# for i in range(0, len(self.content['web'])):
# print self.content['web'][i]['key'], ':'
# for j in range(0, len(self.content['web'][i]['value'])):
# print self.content['web'][i]['value'][j]
elif code == 20: # Text to long
print('WORD TO LONG')
elif code == 30: # Trans error
print('TRANSLATE ERROR')
elif code == 40: # Don't support this language
print('CAN\'T SUPPORT THIS LANGUAGE')
elif code == 50: # Key failed
print('KEY FAILED')
elif code == 60: # Don't have this word
print('DO\'T HAVE THIS WORD')
class Clipboard (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.raw = "Welcome!!"
def run(self):
global exitflag
while not exitflag:
time.sleep(0.5)
new_raw = pyperclip.paste()
if new_raw != self.raw:
self.raw = new_raw
words = self.raw.split(",")
print()
for word in words:
Dict(word)
# 这里为什么不显示诶????
print(">>>", end="", flush=True)
class Outinput (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global exitflag
while not exitflag:
raw = input(">>>")
words = raw.split(",")
if words == ['exit']:
exitflag = True
else:
for word in words:
Dict(word)
def quit():
print("bye!!")
sys.exit()
if __name__ == '__main__':
exitflag = False
try:
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGTERM, quit)
thread1 = Clipboard()
thread2 = Outinput()
thread1.setDaemon(True)
thread1.start()
thread2.setDaemon(True)
thread2.start()
thread1.join()
thread2.join()
print("bye!!")
except:
print()
| 3,147
| 280
| 198
|
ce0cef8e0251b3e6085357fac68c61249be16c60
| 2,512
|
py
|
Python
|
wikipedia_fetch.py
|
nik7273/computational-medical-knowledge
|
03357fc63382bed49509d7860f87a3d010f03018
|
[
"Apache-2.0"
] | null | null | null |
wikipedia_fetch.py
|
nik7273/computational-medical-knowledge
|
03357fc63382bed49509d7860f87a3d010f03018
|
[
"Apache-2.0"
] | null | null | null |
wikipedia_fetch.py
|
nik7273/computational-medical-knowledge
|
03357fc63382bed49509d7860f87a3d010f03018
|
[
"Apache-2.0"
] | 1
|
2019-09-17T18:38:44.000Z
|
2019-09-17T18:38:44.000Z
|
# -*- coding: utf-8 -*-
#Search Wikipedia for Heart Attack
import wikipedia, codecs, itertools, os, time
from pprint import pprint
relevant_categories = {'medical','emergencies','disease'}
conditions = ["heart attack","palpitations"] #Search all related pages?
make_filename = lambda aStr: aStr.replace(' ','_')
for condition in conditions:
findRelevantArticles(condition,data_path=os.path.join('./data/wikipedia',make_filename(condition)))
| 55.822222
| 143
| 0.615844
|
# -*- coding: utf-8 -*-
#Search Wikipedia for Heart Attack
import wikipedia, codecs, itertools, os, time
from pprint import pprint
relevant_categories = {'medical','emergencies','disease'}
def findRelevantArticles(term,data_path='.'):
articleList = []
articles = wikipedia.search(term) #Setting suggestion = False (default value); No clear use for it now
for article in articles:
try:
article = wikipedia.page(article)
category_keywords = set(list(itertools.chain.from_iterable([category.lower().split() for category in article.categories])))
if len(category_keywords & relevant_categories) > 0:
articlefilename = "content_"+str(article.title.lower())+".txt"
if os.path.isfile(articlefilename):
articlefilename = "content_"+ str(article.title.lower())+'%s.txt' % str(term+time.strftime("%Y%m%d-%H%M%S"))
with codecs.open(os.path.join(data_path,articlefilename),'wb', 'utf-8') as outfile:
content = wikipedia.page(article).content
print>>outfile,content
articleList.append(str(article.title))
except wikipedia.exceptions.PageError as e:
pass
except wikipedia.exceptions.DisambiguationError as e:
for article in e.options:
try:
article = wikipedia.page(article)
category_keywords = set(list(itertools.chain.from_iterable([category.lower().split() for category in article.categories])))
if len(category_keywords & relevant_categories) > 0:
articlefilename = "content_"+str(article.title.lower())+".txt"
if os.path.isfile(articlefilename):
articlefilename = "content_"+ str(article.title.lower())+'%s.txt' % str(term+time.strftime("%Y%m%d-%H%M%S"))
with codecs.open(os.path.join(data_path,articlefilename),'wb','utf-8') as outfile:
print>>outfile,article.content
articleList.append(str(article.title))
except wikipedia.exceptions.DisambiguationError as f:
pass
conditions = ["heart attack","palpitations"] #Search all related pages?
make_filename = lambda aStr: aStr.replace(' ','_')
for condition in conditions:
findRelevantArticles(condition,data_path=os.path.join('./data/wikipedia',make_filename(condition)))
| 2,033
| 0
| 23
|
dd59d853dc65578a9c1c4b63d4f2b0e492d54f61
| 14,741
|
py
|
Python
|
accounts/views.py
|
witty-technologies-empowerment/codeupblood
|
a0aa1725e5776d80e083b6d4e9e67476bb97e983
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
witty-technologies-empowerment/codeupblood
|
a0aa1725e5776d80e083b6d4e9e67476bb97e983
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
witty-technologies-empowerment/codeupblood
|
a0aa1725e5776d80e083b6d4e9e67476bb97e983
|
[
"MIT"
] | 1
|
2022-01-19T11:09:13.000Z
|
2022-01-19T11:09:13.000Z
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from datetime import datetime, timedelta
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import Context
from django.template.loader import render_to_string
import random
import string
import requests
import json
from donor.models import DonorDetail as DD, NewDonor as ND
from recipient.models import RecipientDetail as RD
from .models import AccountPath as AP
# Create your views here.
@login_required(login_url='/accounts')
#================= FUCTIONS =================#
| 35.86618
| 114
| 0.521606
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from datetime import datetime, timedelta
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import Context
from django.template.loader import render_to_string
import random
import string
import requests
import json
from donor.models import DonorDetail as DD, NewDonor as ND
from recipient.models import RecipientDetail as RD
from .models import AccountPath as AP
# Create your views here.
def AccountHome(request):
user_login = request.user
if request.user.is_authenticated:
check_path = AP.objects.filter(username=user_login)
for x in check_path:
path = x.path
if path == 'admin':
pass
elif path == 'donor':
return HttpResponseRedirect(reverse('donor:home'))
else:
# return HttpResponseRedirect(reverse('reci:home'))
pass
else:
if request.method == 'POST':
path = request.POST.get('path')
if path == 'donor':
return HttpResponseRedirect(reverse('donor:home'))
else:
# return HttpResponseRedirect(reverse('reci:home'))
pass
return render(request, 'accounts/home.html',)
def donor_login(request, Nuser=None):
user_login = request.user
if request.user.is_authenticated:
check_path = AP.objects.filter(username=user_login)
for x in check_path:
path = x.path
if path == 'admin':
pass
elif path == 'donor':
return HttpResponseRedirect(reverse('donor:home'))
else:
# return HttpResponseRedirect(reverse('reci:home'))
pass
else:
next_url = None
next_ = False
display = False
error = ''
if 'next' in str(request):
next_url = request.GET.get('next')
next_ = True
if request.method == 'POST':
# user_name = request.POST.get('emailID')
email = request.POST.get('username')
email = str(email).strip().lower()
pass_word = request.POST.get('password')
password = str(pass_word).strip().lower()
if '@' in email:
get_email = User.objects.filter(email=email)
if get_email.exists():
for x in get_email:
username = x.username
else:
error = 'Invalid Login email'
display = True
context = {
'error':error,
'display':display,
}
return render(request, 'accounts/donor_login.html', context)
else:
get_username = User.objects.filter(username=email)
if get_username.exists():
username = email
else:
error = 'Invalid Login User'
display = True
context = {
'error':error,
'display':display,
}
return render(request, 'accounts/donor_login.html', context)
get_active = User.objects.filter(username=username.lower(), is_active=True)
if get_active.exists():
user = authenticate(username=username.lower(), password=password)
else:
get_user = User.objects.filter(username=username.lower())
if get_user.exists():
error = 'Invalid Login Details'
display = True
context = {
'error':error,
'display':display,
}
return render(request, 'accounts/donor_login.html', context)
else:
error = 'Account Deactivated'
display = True
xdisplay = True
context = {
'error':error,
'display':display,
'xdisplay':xdisplay,
}
return render(request, 'accounts/donor_login.html', context)
if user:
if user.is_active:
login(request, user)
# data = RA()
# data.user = username
# data.activity_type = 'login'
# now = datetime.now()
# date = now.strftime('%Y-%m-%dT%TZ')
# data.time = date
# data.status = 'You logged in near ' + location
# data.save()
# send_sms.send(sender=None, smstype='login-'+str(location), user = request.user)
if 'admin' in username:
url = str('/_')
return HttpResponseRedirect(url)
else:
if next_:
if('signout' not in next_url):
return HttpResponseRedirect(next_url)
else:
return HttpResponseRedirect(reverse('donor:home'))
else:
return HttpResponseRedirect(reverse('donor:home'))
else:
error = 'Invalid Login'
display = True
context = {
'error':error,
'display':display,
}
return render(request, 'accounts/donor_login.html', context)
else:
error = 'Invalid Login'
display = True
context = {
'error':error,
'display':display,
}
return render(request, 'accounts/donor_login.html', context)
else:
if Nuser != None:
context = {
'Nuser':Nuser,
'error':error,
'display':display,
}
else:
context = {
'error':error,
'display':display,
}
return render(request, 'accounts/donor_login.html', context)
return render(request, 'accounts/donor_login.html')
def donor_reg(request):
xcheck = False
user_login = request.user
full_name = ''
username = ''
email = ''
user_id = ran_gen(6,'ABCDEFGHIJKLMPQRSTUVWXYZ123456789')
while xcheck:
check_id = AP.objects.filter(username=user_id)
if check_id.exists():
user_id = ran_gen(6,'ABCDEFGHIJKLMPQRSTUVWXYZ123456789')
xcheck = False
else:
xcheck = True
display = False
xList = []
if request.user.is_authenticated:
check_path = AP.objects.filter(username=user_login)
for x in check_path:
path = x.path
if path == 'admin':
pass
elif path == 'donor':
return HttpResponseRedirect(reverse('donor:home'))
else:
# return HttpResponseRedirect(reverse('reci:home'))
pass
else:
if request.method == 'POST':
f_name = request.POST.get('name')
full_name = f_name.lower().lstrip().rstrip()
user_name = request.POST.get('username')
username = user_name.lower().lstrip().rstrip()
# gen_der = request.POST.get('gender')
# gender = gen_der.lower().lstrip().rstrip()
# blood_type = request.POST.get('bloodtype')
# bloodtype = blood_type.lower().lstrip().rstrip()
e_mail = request.POST.get('email')
email = e_mail.lower().lstrip().rstrip()
# tele_phone = request.POST.get('telephone')
# telephone = tele_phone.lower().lstrip().rstrip()
# sta_te = request.POST.get('state')
# state = sta_te.lower().lstrip().rstrip()
pass_ = request.POST.get('pass')
password = pass_.lower().lstrip().rstrip()
pass_2 = request.POST.get('pass2')
password2 = pass_2.lower().lstrip().rstrip()
checkemail = User.objects.filter(email=email)
checkuser = User.objects.filter(username=username)
if password != password2:
error = {'error':"Both passwords didn't match"}
display = True
xList.append(error)
if len(password) < 6 :
error = {'error':"Password should be at least 6 charaters long"}
display = True
xList.append(error)
if checkuser.exists():
error = {'error':str(username) + " is not available"}
display = True
xList.append(error)
if checkemail.exists():
error = {'error':str(email) + " has been used"}
display = True
xList.append(error)
email = ''
if ' ' not in full_name:
error = {'error':"Please provide full name with a space"}
display = True
xList.append(error)
if not display:
random_number = random.randint(0,16777215)
hex_number = format(random_number,'x')
hex_number = '#'+hex_number
# RefCode = ran_gen(8,'ABCDEFGHIJKLMPQRSTUVWXYZ123456789')
name = full_name.split(' ')
namex = full_name.replace(' ', '-')
detail = User()
detail.first_name = name[0]
detail.last_name = name[1]
detail.username = username
detail.email = email
detail.set_password(password)
detail.active = True
detail.staff_status = False
detail.superuser_status = False
detail.save()
a = AP()
a.username = username
a.color_code = hex_number
a.path = 'donor'
a.save()
a = DD()
a.full_name = full_name
a.username = username
a.gender = ''
a.bloodtype = ''
a.email = email
a.telephone = ''
a.state = ''
a.password = password
a.save()
a = ND()
a.user = username
a.expires = datetime.now() + timedelta(hours=24)
a.save()
user = authenticate(username=username, password=password)
if user:
if user.is_active:
try:
URL = 'https://safewayfx.com/api/v1/codeupblood/newUser/'+namex+'/'+email+'/'+username
print(URL)
ress = json.loads(requests.get(URL).json())
print(str(ress))
except Exception as e:
print('>>>'+str(e))
login(request, user)
return HttpResponseRedirect(reverse('accounts:donor_add'))
data = {
'user_id':'CB-'+user_id,
'display':display,
'xLists':xList,
'full_name':full_name,
'username':username,
'email':email,
}
return render(request, 'accounts/donor_reg.html', data)
def donor_recover(request):
return render(request, 'accounts/donor_pwd.html')
def rec_login(request):
return render(request, 'accounts/rec_login.html')
def rec_reg(request):
user_id = ran_gen(6,'ABCDEFGHIJKLMPQRSTUVWXYZ123456789')
data = {
'user_id':user_id,
}
return render(request, 'accounts/rec_reg.html', data)
def rec_recover(request):
return render(request, 'accounts/rec_pwd.html')
def donor_add(request):
user_login = request.user
display = False
# if request.user.is_authenticated:
# check_path = AP.objects.filter(username=user_login)
# check_ = DD.objects.filter(username=user_login)
# for x in check_path:
# path = x.path
# if path == 'admin':
# return HttpResponseRedirect(reverse('admin'))
# elif path == 'donor':
# if check_.exists():
# return HttpResponseRedirect(reverse('donor:home'))
# else:
# # return HttpResponseRedirect(reverse('reci:home'))
# pass
# else:
if request.method == 'POST':
te_le = request.POST.get('tele')
tele = te_le.lower().lstrip().rstrip()
addre_ss = request.POST.get('address')
address = addre_ss.lower().lstrip().rstrip()
locali_ty = request.POST.get('locality')
locality = locali_ty.lower().lstrip().rstrip()
sta_te = request.POST.get('state')
state = sta_te.lower().lstrip().rstrip()
count_ry = request.POST.get('country')
country = count_ry.lower().lstrip().rstrip()
gend_er = request.POST.get('gender')
gender = gend_er.lower().lstrip().rstrip()
bloodty_pe = request.POST.get('bloodtype')
bloodtype = bloodty_pe.lower().lstrip().rstrip()
xList = []
checkPhone = DD.objects.filter(telephone=tele)
if checkPhone.exists():
error = {'error':"Telephone already exists"}
display = True
xList.append(error)
if not display:
checkPhone = DD.objects.filter(username=user_login)
for x in checkPhone:
a = DD()
a.pk = x.pk
a.full_name = x.full_name
a.username = x.username
a.gender = gender
a.bloodtype = bloodtype
a.email = x.email
a.telephone = tele
a.address = address
a.city = locality
a.state = state
a.country = country
a.password = x.password
a.created = x.created
a.save()
return HttpResponseRedirect(reverse('donor:home'))
return render(request, 'accounts/donor_add.html')
@login_required(login_url='/accounts')
def any_user_signout(request):
auth = request.user
message = 'You have successfully signed out'
logout(request)
context = {
'message':message,
'auth':auth,
}
return HttpResponseRedirect(reverse('accounts:home'))
#================= FUCTIONS =================#
def ran_gen(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
| 13,692
| 0
| 228
|
6bdd6d4d7f38f692c96a75ae5e82669cba9cf73b
| 1,036
|
py
|
Python
|
programs_tutorial_2/direct_disks_multirun.py
|
golu-golu/statistcal-mechanics
|
8ff66280ee5a6a816e6ca70934e92001e624dfad
|
[
"MIT"
] | null | null | null |
programs_tutorial_2/direct_disks_multirun.py
|
golu-golu/statistcal-mechanics
|
8ff66280ee5a6a816e6ca70934e92001e624dfad
|
[
"MIT"
] | null | null | null |
programs_tutorial_2/direct_disks_multirun.py
|
golu-golu/statistcal-mechanics
|
8ff66280ee5a6a816e6ca70934e92001e624dfad
|
[
"MIT"
] | null | null | null |
import random, math
N = 16
eta = 0.26
sigma = math.sqrt(eta / N / math.pi)
n_runs = 100
print 'Note that this program might take a while!'
for run in range(n_runs):
iterations, config = direct_disks(N, sigma)
print 'run',run
print iterations - 1, 'tabula rasa wipe-outs before producing the following configuration'
print config
print
| 26.564103
| 94
| 0.542471
|
import random, math
def dist(x,y):
d_x = abs(x[0] - y[0]) % 1.0
d_x = min(d_x, 1.0 - d_x)
d_y = abs(x[1] - y[1]) % 1.0
d_y = min(d_y, 1.0 - d_y)
return math.sqrt(d_x**2 + d_y**2)
def direct_disks(N, sigma):
n_iter = 0
condition = False
while condition == False:
n_iter += 1
L = [(random.random(), random.random())]
for k in range(1, N):
a = (random.random(), random.random())
min_dist = min(dist(a, b) for b in L)
if min_dist < 2.0 * sigma:
condition = False
break
else:
L.append(a)
condition = True
return n_iter, L
N = 16
eta = 0.26
sigma = math.sqrt(eta / N / math.pi)
n_runs = 100
print 'Note that this program might take a while!'
for run in range(n_runs):
iterations, config = direct_disks(N, sigma)
print 'run',run
print iterations - 1, 'tabula rasa wipe-outs before producing the following configuration'
print config
print
| 628
| 0
| 50
|
9ae56ae95d290db134f2e153096fa8dd43af143f
| 190
|
py
|
Python
|
Python/Topics/Regexp functions in Python/Matching username requirements/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Topics/Regexp functions in Python/Matching username requirements/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Topics/Regexp functions in Python/Matching username requirements/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
import re
template = r"[a-zA-Z]"
username = input()
match = re.match(template, username)
if match:
print("Thank you!")
else:
print("Oops! The username has to start with a letter.")
| 19
| 59
| 0.668421
|
import re
template = r"[a-zA-Z]"
username = input()
match = re.match(template, username)
if match:
print("Thank you!")
else:
print("Oops! The username has to start with a letter.")
| 0
| 0
| 0
|
ac03251cb2681eb1fe70b4d5e22c86a343b0173e
| 1,428
|
py
|
Python
|
tools/get_comm.py
|
hfingler/ava
|
8ade884d82dc1465a24fd1ab682a54afe1765f6e
|
[
"BSD-2-Clause"
] | null | null | null |
tools/get_comm.py
|
hfingler/ava
|
8ade884d82dc1465a24fd1ab682a54afe1765f6e
|
[
"BSD-2-Clause"
] | null | null | null |
tools/get_comm.py
|
hfingler/ava
|
8ade884d82dc1465a24fd1ab682a54afe1765f6e
|
[
"BSD-2-Clause"
] | 1
|
2021-06-17T16:13:27.000Z
|
2021-06-17T16:13:27.000Z
|
import argparse
import numpy as np
if __name__ == '__main__':
main()
| 29.75
| 85
| 0.553221
|
import argparse
import numpy as np
def load_stats(file_name, stats):
with open(file_name, 'r') as fin:
for line in fin:
sp = line.strip().split(",")
time = sp[1].strip()
namesp = sp[0].split(" ")
name = namesp[1]
if name not in stats:
stats[name] = [0, []]
stats[name][0] += 1
stats[name][1].append(int(time))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gstats", required=True, type=str, help="guest stats file")
parser.add_argument("--wstats", required=True, help="worker stats file")
args = parser.parse_args()
guest_stats = {}
worker_stats = {}
load_stats(args.gstats, guest_stats)
load_stats(args.wstats, worker_stats)
keys = sorted(guest_stats.keys())
for n in keys:
if n[-6:] == "_async":
name = n[:-6]
else:
name = n
if name in worker_stats:
g_exec_time = np.array(guest_stats[n][1])
w_exec_time = np.array(worker_stats[name][1])
g_exec_time = g_exec_time / 1000000.0
w_exec_time = w_exec_time / 1000000.0
g_total = np.sum(g_exec_time)
w_total = np.sum(w_exec_time)
print(str(n), round(g_total, 3), round(w_total, 3),
round(g_total - w_total, 3))
if __name__ == '__main__':
main()
| 1,305
| 0
| 46
|
6f33ee5e698e01170b6449db0fd472335c766d53
| 814
|
py
|
Python
|
src/livecoding/pythonreloader.py
|
ashwoods/python-qt-live-coding
|
b87e6fed021c5a9af72dee3b7b32f9c799816b8e
|
[
"MIT"
] | 37
|
2018-07-08T04:53:12.000Z
|
2022-03-17T07:33:21.000Z
|
src/livecoding/pythonreloader.py
|
ashwoods/python-qt-live-coding
|
b87e6fed021c5a9af72dee3b7b32f9c799816b8e
|
[
"MIT"
] | 2
|
2020-01-07T22:03:29.000Z
|
2020-09-28T12:15:57.000Z
|
src/livecoding/pythonreloader.py
|
ashwoods/python-qt-live-coding
|
b87e6fed021c5a9af72dee3b7b32f9c799816b8e
|
[
"MIT"
] | 6
|
2020-02-12T18:55:13.000Z
|
2021-12-31T03:54:40.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
import signal
import inspect
from qtpy.QtCore import QObject, Slot
| 30.148148
| 84
| 0.638821
|
# -*- coding: utf-8 -*-
import os
import sys
import signal
import inspect
from qtpy.QtCore import QObject, Slot
class PythonReloader(QObject):
def __init__(self, main, parent=None):
super(PythonReloader, self).__init__(parent)
self._main = main
@Slot()
def restart(self):
import_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
python_path = os.environ.get('PYTHONPATH', '')
if import_dir not in python_path:
python_path += ':{}'.format(import_dir)
os.environ['PYTHONPATH'] = python_path
args = [sys.executable, self._main] + sys.argv[1:]
handler = signal.getsignal(signal.SIGTERM)
if handler:
handler(signal.SIGTERM, inspect.currentframe())
os.execv(sys.executable, args)
| 603
| 74
| 23
|
28dbf580557251c65f5af58073dd31f05369dcdd
| 482
|
py
|
Python
|
sla_cli/src/db/accessors/abbreviations.py
|
DavidWalshe93/SL-CLI
|
c92ca8a6e57eb51bf9c9433013ce16d443f8d152
|
[
"MIT"
] | 2
|
2022-01-07T09:59:32.000Z
|
2022-01-25T12:04:06.000Z
|
sla_cli/src/db/accessors/abbreviations.py
|
DavidWalshe93/SL-CLI
|
c92ca8a6e57eb51bf9c9433013ce16d443f8d152
|
[
"MIT"
] | null | null | null |
sla_cli/src/db/accessors/abbreviations.py
|
DavidWalshe93/SL-CLI
|
c92ca8a6e57eb51bf9c9433013ce16d443f8d152
|
[
"MIT"
] | 1
|
2021-04-07T17:14:37.000Z
|
2021-04-07T17:14:37.000Z
|
"""
Author: David Walshe
Date: 08 April 2021
"""
import logging
from tabulate import tabulate
from sla_cli.src.db.accessors.base import Accessor
logger = logging.getLogger(__name__)
| 24.1
| 143
| 0.69917
|
"""
Author: David Walshe
Date: 08 April 2021
"""
import logging
from tabulate import tabulate
from sla_cli.src.db.accessors.base import Accessor
logger = logging.getLogger(__name__)
class Abbreviations(Accessor):
def abbreviations(self, tablefmt: str = "simple") -> str:
"""Returns the abbreviation table."""
return tabulate([(abbrev, dataset) for dataset, abbrev in self.db.abbrev.items()], headers=["Abbrev.", "Diagnosis"], tablefmt=tablefmt)
| 0
| 262
| 23
|
2ef452770876cc4be160a58e302e2efdc1a66543
| 20,528
|
py
|
Python
|
tia/analysis/model/ret.py
|
lsternlicht/tia
|
fe74d1876260a946e52bd733bc32da0698749f2c
|
[
"BSD-3-Clause"
] | null | null | null |
tia/analysis/model/ret.py
|
lsternlicht/tia
|
fe74d1876260a946e52bd733bc32da0698749f2c
|
[
"BSD-3-Clause"
] | null | null | null |
tia/analysis/model/ret.py
|
lsternlicht/tia
|
fe74d1876260a946e52bd733bc32da0698749f2c
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.util.decorator import lazy_property
from tia.analysis.model.interface import TxnPlColumns as TPL
from tia.analysis.perf import drawdown_info, drawdowns, guess_freq, downside_deviation, periodicity
from tia.analysis.plots import plot_return_on_dollar
from tia.util.mplot import AxesFormat
from tia.util.fmt import PercentFormatter, new_percent_formatter, new_float_formatter
__all__ = ['RoiiRetCalculator', 'AumRetCalculator', 'FixedAumRetCalculator', 'CumulativeRets', 'Performance']
def return_on_initial_capital(capital, period_pl, leverage=None):
"""Return the daily return series based on the capital"""
if capital <= 0:
raise ValueError('cost must be a positive number not %s' % capital)
leverage = leverage or 1.
eod = capital + (leverage * period_pl.cumsum())
ltd_rets = (eod / capital) - 1.
dly_rets = ltd_rets
dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:]
return dly_rets
| 42.589212
| 120
| 0.610581
|
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.util.decorator import lazy_property
from tia.analysis.model.interface import TxnPlColumns as TPL
from tia.analysis.perf import drawdown_info, drawdowns, guess_freq, downside_deviation, periodicity
from tia.analysis.plots import plot_return_on_dollar
from tia.util.mplot import AxesFormat
from tia.util.fmt import PercentFormatter, new_percent_formatter, new_float_formatter
__all__ = ['RoiiRetCalculator', 'AumRetCalculator', 'FixedAumRetCalculator', 'CumulativeRets', 'Performance']
def return_on_initial_capital(capital, period_pl, leverage=None):
"""Return the daily return series based on the capital"""
if capital <= 0:
raise ValueError('cost must be a positive number not %s' % capital)
leverage = leverage or 1.
eod = capital + (leverage * period_pl.cumsum())
ltd_rets = (eod / capital) - 1.
dly_rets = ltd_rets
dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:]
return dly_rets
class RetCalculator(object):
def compute(self, txns):
raise NotImplementedError()
class RoiiRetCalculator(RetCalculator):
def __init__(self, leverage=None):
"""
:param leverage: {None, scalar, Series}, number to scale the position returns
:return:
"""
get_lev = None
if leverage is None:
pass
elif np.isscalar(leverage):
if leverage <= 0:
raise ValueError('leverage must be a positive non-zero number, not %s' % leverage)
else:
get_lev = lambda ts: leverage
elif isinstance(leverage, pd.Series):
get_lev = lambda ts: leverage.asof(ts)
else:
raise ValueError(
'leverage must be {None, positive scalar, Datetime/Period indexed Series} not %s' % type(leverage))
self.leverage = leverage
self.get_lev = get_lev
def compute(self, txns):
txnpl = txns.pl.txn_frame
txnrets = pd.Series(0, index=txnpl.index, name='ret')
get_lev = self.get_lev
for pid, pframe in txnpl[[TPL.OPEN_VAL, TPL.PID, TPL.PL, TPL.DT]].groupby(TPL.PID):
if pid != 0:
cost = abs(pframe[TPL.OPEN_VAL].iloc[0])
ppl = pframe[TPL.PL]
lev = None if get_lev is None else get_lev(pframe[TPL.DT].iloc[0])
ret = return_on_initial_capital(cost, ppl, lev)
txnrets[ppl.index] = ret
txnrets.index = txnpl[TPL.DT]
crets = CumulativeRets(txnrets)
return Performance(crets)
class FixedAumRetCalculator(RetCalculator):
def __init__(self, aum, reset_freq='M'):
self.aum = aum
self.reset_freq = reset_freq
# capture what cash flows would be needed on reset date to reset the aum
self.external_cash_flows = None
def compute(self, txns):
ltd = txns.pl.ltd_txn
grouper = pd.TimeGrouper(self.reset_freq)
period_rets = pd.Series(np.nan, index=ltd.index)
aum = self.aum
at = 0
cf = OrderedDict()
for key, grp in ltd.groupby(grouper):
if grp.empty:
continue
eod = aum + grp
sod = eod.shift(1)
sod.iloc[0] = aum
period_rets.iloc[at:at + len(grp.index)] = eod / sod - 1.
at += len(grp.index)
# get aum back to fixed amount
cf[key] = eod.iloc[-1] - aum
self.external_cash_flows = pd.Series(cf)
crets = CumulativeRets(period_rets)
return Performance(crets)
class AumRetCalculator(RetCalculator):
def __init__(self, starting_aum, freq='M'):
self.starting_aum = starting_aum
self.freq = freq
self.txn_aum = None
def compute(self, txns):
ltd = txns.pl.ltd_txn
grouper = pd.TimeGrouper(self.freq)
period_rets = pd.Series(np.nan, index=ltd.index)
self.txn_aum = txn_aum = pd.Series(np.nan, index=ltd.index)
sop = self.starting_aum
at = 0
for key, grp in ltd.groupby(grouper):
if grp.empty:
continue
eod = sop + grp
sod = eod.shift(1)
sod.iloc[0] = sop
period_rets.iloc[at:at + len(grp.index)] = eod / sod - 1.
txn_aum.iloc[at:at + len(grp.index)] = sod
at += len(grp.index)
sop = eod.iloc[-1]
crets = CumulativeRets(period_rets)
return Performance(crets)
class CumulativeRets(object):
def __init__(self, rets=None, ltd_rets=None):
if rets is None and ltd_rets is None:
raise ValueError('rets or ltd_rets must be specified')
if rets is None:
if ltd_rets.empty:
rets = ltd_rets
else:
rets = (1. + ltd_rets).pct_change()
rets.iloc[0] = ltd_rets.iloc[0]
if ltd_rets is None:
if rets.empty:
ltd_rets = rets
else:
ltd_rets = (1. + rets).cumprod() - 1.
self.rets = rets
self.ltd_rets = ltd_rets
pds_per_year = property(lambda self: periodicity(self.rets))
def asfreq(self, freq):
other_pds_per_year = periodicity(freq)
if self.pds_per_year < other_pds_per_year:
msg = 'Cannot downsample returns. Cannot convert from %s periods/year to %s'
raise ValueError(msg % (self.pds_per_year, other_pds_per_year))
if freq == 'B':
rets = (1. + self.rets).groupby(self.rets.index.date).apply(lambda s: s.prod()) - 1.
# If you do not do this, it will be an object index
rets.index = pd.DatetimeIndex([i for i in rets.index])
return CumulativeRets(rets)
else:
rets = (1. + self.rets).resample(freq, how='prod') - 1.
return CumulativeRets(rets)
# -----------------------------------------------------------
# Resampled data
dly = lazy_property(lambda self: self.asfreq('B'), 'dly')
weekly = lazy_property(lambda self: self.asfreq('W'), 'weekly')
monthly = lazy_property(lambda self: self.asfreq('M'), 'monthly')
quarterly = lazy_property(lambda self: self.asfreq('Q'), 'quarterly')
annual = lazy_property(lambda self: self.asfreq('A'), 'annual')
# -----------------------------------------------------------
# Basic Metrics
@lazy_property
def ltd_rets_ann(self):
return (1. + self.ltd_rets) ** (self.pds_per_year / pd.expanding_count(self.rets)) - 1.
cnt = property(lambda self: self.rets.notnull().astype(int).sum())
mean = lazy_property(lambda self: self.rets.mean(), 'avg')
mean_ann = lazy_property(lambda self: self.mean * self.pds_per_year, 'avg_ann')
ltd = lazy_property(lambda self: self.ltd_rets.iloc[-1], name='ltd')
ltd_ann = lazy_property(lambda self: self.ltd_rets_ann.iloc[-1], name='ltd_ann')
std = lazy_property(lambda self: self.rets.std(), 'std')
std_ann = lazy_property(lambda self: self.std * np.sqrt(self.pds_per_year), 'std_ann')
drawdown_info = lazy_property(lambda self: drawdown_info(self.rets), 'drawdown_info')
drawdowns = lazy_property(lambda self: drawdowns(self.rets), 'drawdowns')
maxdd = lazy_property(lambda self: self.drawdown_info['maxdd'].min(), 'maxdd')
dd_avg = lazy_property(lambda self: self.drawdown_info['maxdd'].mean(), 'dd_avg')
kurtosis = lazy_property(lambda self: self.rets.kurtosis(), 'kurtosis')
skew = lazy_property(lambda self: self.rets.skew(), 'skew')
sharpe_ann = lazy_property(lambda self: np.divide(self.ltd_ann, self.std_ann), 'sharpe_ann')
downside_deviation = lazy_property(lambda self: downside_deviation(self.rets, mar=0, full=0, ann=1),
'downside_deviation')
sortino = lazy_property(lambda self: self.ltd_ann / self.downside_deviation, 'sortino')
@lazy_property
def maxdd_dt(self):
ddinfo = self.drawdown_info
if ddinfo.empty:
return None
else:
return self.drawdown_info['maxdd dt'].loc[self.drawdown_info['maxdd'].idxmin()]
# -----------------------------------------------------------
# Expanding metrics
expanding_mean = property(lambda self: pd.expanding_mean(self.rets), 'expanding_avg')
expanding_mean_ann = property(lambda self: self.expanding_mean * self.pds_per_year, 'expanding_avg_ann')
expanding_std = lazy_property(lambda self: pd.expanding_std(self.rets), 'expanding_std')
expanding_std_ann = lazy_property(lambda self: self.expanding_std * np.sqrt(self.pds_per_year), 'expanding_std_ann')
expanding_sharpe_ann = property(lambda self: np.divide(self.ltd_rets_ann, self.expanding_std_ann))
# -----------------------------------------------------------
# Rolling metrics
rolling_mean = property(lambda self: pd.rolling_mean(self.rets), 'rolling_avg')
rolling_mean_ann = property(lambda self: self.rolling_mean * self.pds_per_year, 'rolling_avg_ann')
def rolling_ltd_rets(self, n):
return pd.rolling_apply(self.rets, n, lambda s: (1. + s).prod() - 1.)
def rolling_ltd_rets_ann(self, n):
tot = self.rolling_ltd_rets(n)
return tot ** (self.pds_per_year / n)
def rolling_std(self, n):
return pd.rolling_std(self.rets, n)
def rolling_std_ann(self, n):
return self.rolling_std(n) * np.sqrt(self.pds_per_year)
def rolling_sharpe_ann(self, n):
return self.rolling_ltd_rets_ann(n) / self.rolling_std_ann(n)
def iter_by_year(self):
"""Split the return objects by year and iterate"""
for key, grp in self.rets.groupby(lambda x: x.year):
yield key, CumulativeRets(rets=grp)
def truncate(self, before=None, after=None):
rets = self.rets.truncate(before=before, after=after)
return CumulativeRets(rets=rets)
@lazy_property
def summary(self):
d = OrderedDict()
d['ltd'] = self.ltd
d['ltd ann'] = self.ltd_ann
d['mean'] = self.mean
d['mean ann'] = self.mean_ann
d['std'] = self.std
d['std ann'] = self.std_ann
d['sharpe ann'] = self.sharpe_ann
d['sortino'] = self.sortino
d['maxdd'] = self.maxdd
d['maxdd dt'] = self.maxdd_dt
d['dd avg'] = self.dd_avg
d['cnt'] = self.cnt
return pd.Series(d, name=self.rets.index.freq or guess_freq(self.rets.index))
def _repr_html_(self):
from tia.util.fmt import new_dynamic_formatter
fmt = new_dynamic_formatter(method='row', precision=2, pcts=1, trunc_dot_zeros=1, parens=1)
df = self.summary.to_frame()
return fmt(df)._repr_html_()
def get_alpha_beta(self, bm_rets):
if isinstance(bm_rets, pd.Series):
bm = CumulativeRets(bm_rets)
elif isinstance(bm_rets, CumulativeRets):
bm = bm_rets
else:
raise ValueError('bm_rets must be series or CumulativeRetPerformace not %s' % (type(bm_rets)))
bm_freq = guess_freq(bm_rets)
if self.pds_per_year != bm.pds_per_year:
tgt = {'B': 'dly', 'W': 'weekly', 'M': 'monthly', 'Q': 'quarterly', 'A': 'annual'}.get(bm_freq, None)
if tgt is None:
raise ValueError('No mapping for handling benchmark with frequency: %s' % bm_freq)
tmp = getattr(self, tgt)
y = tmp.rets
y_ann = tmp.ltd_ann
else:
y = self.rets
y_ann = self.ltd_ann
x = bm.rets.truncate(y.index[0], y.index[-1])
x_ann = bm.ltd_ann
model = pd.ols(x=x, y=y)
beta = model.beta[0]
alpha = y_ann - beta * x_ann
return pd.Series({'alpha': alpha, 'beta': beta}, name=bm_freq)
def plot_ltd(self, ax=None, style='k', label='ltd', show_dd=1, title=True, legend=1):
ltd = self.ltd_rets
ax = ltd.plot(ax=ax, style=style, label=label)
if show_dd:
dd = self.drawdowns
dd.plot(style='r', label='drawdowns', alpha=.5, ax=ax)
ax.fill_between(dd.index, 0, dd.values, facecolor='red', alpha=.25)
fmt = PercentFormatter
AxesFormat().Y.percent().X.label("").apply(ax)
legend and ax.legend(loc='upper left', prop={'size': 12})
# show the actualy date and value
mdt, mdd = self.maxdd_dt, self.maxdd
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.25)
try:
dtstr = '{0}'.format(mdt.to_period())
except:
# assume daily
dtstr = '{0}'.format(hasattr(mdt, 'date') and mdt.date() or mdt)
ax.text(mdt, dd[mdt], "{1} \n {0}".format(fmt(mdd), dtstr).strip(), ha="center", va="top", size=8,
bbox=bbox_props)
if title is True:
pf = new_percent_formatter(1, parens=False, trunc_dot_zeros=True)
ff = new_float_formatter(precision=1, parens=False, trunc_dot_zeros=True)
total = pf(self.ltd_ann)
vol = pf(self.std_ann)
sh = ff(self.sharpe_ann)
mdd = pf(self.maxdd)
title = 'ret$\mathregular{_{ann}}$ %s vol$\mathregular{_{ann}}$ %s sharpe %s maxdd %s' % (
total, vol, sh, mdd)
title and ax.set_title(title, fontdict=dict(fontsize=10, fontweight='bold'))
return ax
def plot_ret_on_dollar(self, title=None, show_maxdd=1, figsize=None, ax=None, append=0, label=None, **plot_args):
plot_return_on_dollar(self.rets, title=title, show_maxdd=show_maxdd, figsize=figsize, ax=ax, append=append,
label=label, **plot_args)
def plot_hist(self, ax=None, **histplot_kwargs):
pf = new_percent_formatter(precision=1, parens=False, trunc_dot_zeros=1)
ff = new_float_formatter(precision=1, parens=False, trunc_dot_zeros=1)
ax = self.rets.hist(ax=ax, **histplot_kwargs)
AxesFormat().X.percent(1).apply(ax)
m, s, sk, ku = pf(self.mean), pf(self.std), ff(self.skew), ff(self.kurtosis)
txt = '$\mathregular{\mu}$=%s $\mathregular{\sigma}$=%s skew=%s kurt=%s' % (m, s, sk, ku)
bbox = dict(facecolor='white', alpha=0.5)
ax.text(0, 1, txt, fontdict={'fontweight': 'bold'}, bbox=bbox, ha='left', va='top', transform=ax.transAxes)
return ax
def filter(self, mask, keep_ltd=0):
if isinstance(mask, pd.Series):
mask = mask.values
rets = self.rets.loc[mask]
ltd = None
if keep_ltd:
ltd = self.ltd_rets.loc[mask]
return CumulativeRets(rets=rets, ltd_rets=ltd)
class Performance(object):
def __init__(self, txn_rets):
if isinstance(txn_rets, pd.Series):
txn_rets = CumulativeRets(txn_rets)
self.txn_details = txn_rets
txn = property(lambda self: self.txn_details.rets)
ltd_txn = property(lambda self: self.txn_details.ltd_rets)
dly_details = lazy_property(lambda self: self.txn_details.dly, 'dly_details')
dly = property(lambda self: self.dly_details.rets)
ltd_dly = property(lambda self: self.dly_details.ltd_rets)
ltd_dly_ann = property(lambda self: self.dly_details.ltd_rets_ann)
weekly_details = lazy_property(lambda self: self.txn_details.weekly, 'weekly_details')
weekly = property(lambda self: self.weekly_details.rets)
ltd_weekly = property(lambda self: self.weekly_details.ltd_rets)
ltd_weekly_ann = property(lambda self: self.weekly_details.ltd_rets_ann)
monthly_details = lazy_property(lambda self: self.txn_details.monthly, 'monthly_details')
monthly = property(lambda self: self.monthly_details.rets)
ltd_monthly = property(lambda self: self.monthly_details.ltd_rets)
ltd_monthly_ann = property(lambda self: self.monthly_details.ltd_rets_ann)
quarterly_details = lazy_property(lambda self: self.txn_details.quarterly, 'quarterly_details')
quarterly = property(lambda self: self.quarterly_details.rets)
ltd_quarterly = property(lambda self: self.quarterly_details.ltd_rets)
ltd_quarterly_ann = property(lambda self: self.quarterly_details.ltd_rets_ann)
annual_details = lazy_property(lambda self: self.txn_details.annual, 'annual_details')
annual = property(lambda self: self.annual_details.rets)
ltd_annual = property(lambda self: self.annual_details.ltd_rets)
ltd_annual_ann = property(lambda self: self.annual_details.ltd_rets_ann)
def iter_by_year(self):
"""Split the return objects by year and iterate"""
for yr, details in self.txn_details.iter_by_year():
yield yr, Performance(details)
def filter(self, txn_mask):
details = self.txn_details.filter(txn_mask)
return Performance(details)
def truncate(self, before=None, after=None):
details = self.txn_details.truncate(before, after)
return Performance(details)
def report_by_year(self, summary_fct=None, years=None, ltd=1, prior_n_yrs=None, first_n_yrs=None, ranges=None,
bm_rets=None):
"""Summary the returns
:param summary_fct: function(Rets) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(performance):
monthly = performance.monthly_details
dly = performance.dly_details
data = OrderedDict()
data['ltd ann'] = monthly.ltd_ann
data['mret avg'] = monthly.mean
data['mret std ann'] = monthly.std_ann
data['sharpe ann'] = monthly.sharpe_ann
data['sortino'] = monthly.sortino
data['maxdd'] = dly.maxdd
data['maxdd dt'] = dly.maxdd_dt
if bm_rets is not None:
abseries = performance.get_alpha_beta(bm_rets)
prefix = {'weekly': 'wkly ', 'monthly': 'mret '}.get(abseries.name, abseries.name)
data['{0}beta'.format(prefix)] = abseries['beta']
data['{0}alpha'.format(prefix)] = abseries['alpha']
data['avg dd'] = dly.dd_avg
data['best month'] = monthly.rets.max()
data['worst month'] = monthly.rets.min()
data['nmonths'] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, robj in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(robj)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = '12/31/%s' % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results['first {0}yrs'.format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate('1/1/%s' % yr_start, '12/31/%s' % yr_end)
results['{0}-{1}'.format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = '1/1/%s' % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results['past {0}yrs'.format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results['ltd'] = summary_fct(self)
return pd.DataFrame(results, index=list(results.values())[0].keys()).T
| 10,461
| 8,768
| 270
|
90edd1b0fcff1bcb117d544390d61e218a49058d
| 158
|
py
|
Python
|
lib/solutions/HLO/hello_solution.py
|
DPNT-Sourcecode/FIZ-rsof01
|
a1820f2122c122dbf574077f08014967f83fbd9b
|
[
"Apache-2.0"
] | null | null | null |
lib/solutions/HLO/hello_solution.py
|
DPNT-Sourcecode/FIZ-rsof01
|
a1820f2122c122dbf574077f08014967f83fbd9b
|
[
"Apache-2.0"
] | null | null | null |
lib/solutions/HLO/hello_solution.py
|
DPNT-Sourcecode/FIZ-rsof01
|
a1820f2122c122dbf574077f08014967f83fbd9b
|
[
"Apache-2.0"
] | null | null | null |
# noinspection PyUnusedLocal
# friend_name = unicode string
#print(hello("Mike"))
| 15.8
| 39
| 0.664557
|
# noinspection PyUnusedLocal
# friend_name = unicode string
def hello(friend_name):
return("Hello, %s!" %friend_name )
#print(hello("Mike"))
| 42
| 0
| 25
|
bd98f8d6beada389f1d1528af29830037c5efe1e
| 3,535
|
py
|
Python
|
sketch/srft.py
|
wangshusen/PyRLA
|
066876545c8501dca8ec857676465553a0ebb822
|
[
"MIT"
] | 12
|
2018-06-15T09:49:36.000Z
|
2020-05-08T12:42:06.000Z
|
sketch/srft.py
|
wangshusen/PyRLA
|
066876545c8501dca8ec857676465553a0ebb822
|
[
"MIT"
] | 1
|
2020-06-09T11:46:05.000Z
|
2020-06-09T12:24:59.000Z
|
sketch/srft.py
|
wangshusen/PyRLA
|
066876545c8501dca8ec857676465553a0ebb822
|
[
"MIT"
] | 3
|
2018-11-05T19:14:21.000Z
|
2019-10-23T02:41:10.000Z
|
import numpy
# Remark:
# Real FFT with even n is faster than real FFT with odd n.
# I do not know why.
def realfft_col(a_mat):
'''
Real Fast Fourier Transform (FFT) Independently Applied to Each Column of A
Input
a_mat: n-by-d dense NumPy matrix.
Output
c_mat: n-by-d matrix C = F * A.
Here F is the n-by-n orthogonal real FFT matrix (not explicitly formed)
Notice that $C^T * C = A^T * A$;
however, $C * C^T = A * A^T$ is not true.
'''
n_int = a_mat.shape[0]
fft_mat = numpy.fft.fft(a_mat, n=None, axis=0) / numpy.sqrt(n_int)
if n_int % 2 == 1:
cutoff_int = int((n_int+1) / 2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int, n_int))
else:
cutoff_int = int(n_int/2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int+1, n_int))
c_mat = fft_mat.real
c_mat[idx_real_vec, :] *= numpy.sqrt(2)
c_mat[idx_imag_vec, :] = fft_mat[idx_imag_vec, :].imag * numpy.sqrt(2)
return c_mat
def realfft_row(a_mat):
'''
Real Fast Fourier Transform (FFT) Independently Applied to Each Row of A
Input
a_mat: m-by-n dense NumPy matrix.
Output
c_mat: m-by-n matrix C = A * F.
Here F is the n-by-n orthogonal real FFT matrix (not explicitly formed)
Notice that $C * C^T = A * A^T$;
however, $C^T * C = A^T * A$ is not true.
'''
n_int = a_mat.shape[1]
fft_mat = numpy.fft.fft(a_mat, n=None, axis=1) / numpy.sqrt(n_int)
if n_int % 2 == 1:
cutoff_int = int((n_int+1) / 2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int, n_int))
else:
cutoff_int = int(n_int/2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int+1, n_int))
c_mat = fft_mat.real
c_mat[:, idx_real_vec] *= numpy.sqrt(2)
c_mat[:, idx_imag_vec] = fft_mat[:, idx_imag_vec].imag * numpy.sqrt(2)
return c_mat
def srft(a_mat, s_int):
'''
Subsampled Randomized Fourier Transform (SRFT) for Dense Matrix
Input
a_mat: m-by-n dense NumPy matrix;
s_int: sketch size.
Output
c_mat: m-by-s sketch C = A * S.
Here S is the sketching matrix (not explicitly formed)
'''
n_int = a_mat.shape[1]
sign_vec = numpy.random.choice(2, n_int) * 2 - 1
idx_vec = numpy.random.choice(n_int, s_int, replace=False)
a_mat = a_mat * sign_vec.reshape(1, n_int)
a_mat = realfft_row(a_mat)
c_mat = a_mat[:, idx_vec] * numpy.sqrt(n_int / s_int)
return c_mat
def srft2(a_mat, b_mat, s_int):
'''
Subsampled Randomized Fourier Transform (SRFT) for Dense Matrix
Input
a_mat: m-by-n dense NumPy matrix;
b_mat: d-by-n dense NumPy matrix;
s_int: sketch size.
Output
c_mat: m-by-s sketch C = A * S;
d_mat: d-by-s sketch D = B * S.
Here S is the sketching matrix (not explicitly formed)
'''
n_int = a_mat.shape[1]
sign_vec = numpy.random.choice(2, n_int) * 2 - 1
idx_vec = numpy.random.choice(n_int, s_int, replace=False)
a_mat = a_mat * sign_vec.reshape(1, n_int)
a_mat = realfft_row(a_mat)
c_mat = a_mat[:, idx_vec] * numpy.sqrt(n_int / s_int)
b_mat = b_mat * sign_vec.reshape(1, n_int)
b_mat = realfft_row(b_mat)
d_mat = b_mat[:, idx_vec] * numpy.sqrt(n_int / s_int)
return c_mat, d_mat
| 31.008772
| 79
| 0.604809
|
import numpy
# Remark:
# Real FFT with even n is faster than real FFT with odd n.
# I do not know why.
def realfft_col(a_mat):
'''
Real Fast Fourier Transform (FFT) Independently Applied to Each Column of A
Input
a_mat: n-by-d dense NumPy matrix.
Output
c_mat: n-by-d matrix C = F * A.
Here F is the n-by-n orthogonal real FFT matrix (not explicitly formed)
Notice that $C^T * C = A^T * A$;
however, $C * C^T = A * A^T$ is not true.
'''
n_int = a_mat.shape[0]
fft_mat = numpy.fft.fft(a_mat, n=None, axis=0) / numpy.sqrt(n_int)
if n_int % 2 == 1:
cutoff_int = int((n_int+1) / 2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int, n_int))
else:
cutoff_int = int(n_int/2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int+1, n_int))
c_mat = fft_mat.real
c_mat[idx_real_vec, :] *= numpy.sqrt(2)
c_mat[idx_imag_vec, :] = fft_mat[idx_imag_vec, :].imag * numpy.sqrt(2)
return c_mat
def realfft_row(a_mat):
'''
Real Fast Fourier Transform (FFT) Independently Applied to Each Row of A
Input
a_mat: m-by-n dense NumPy matrix.
Output
c_mat: m-by-n matrix C = A * F.
Here F is the n-by-n orthogonal real FFT matrix (not explicitly formed)
Notice that $C * C^T = A * A^T$;
however, $C^T * C = A^T * A$ is not true.
'''
n_int = a_mat.shape[1]
fft_mat = numpy.fft.fft(a_mat, n=None, axis=1) / numpy.sqrt(n_int)
if n_int % 2 == 1:
cutoff_int = int((n_int+1) / 2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int, n_int))
else:
cutoff_int = int(n_int/2)
idx_real_vec = list(range(1, cutoff_int))
idx_imag_vec = list(range(cutoff_int+1, n_int))
c_mat = fft_mat.real
c_mat[:, idx_real_vec] *= numpy.sqrt(2)
c_mat[:, idx_imag_vec] = fft_mat[:, idx_imag_vec].imag * numpy.sqrt(2)
return c_mat
def srft(a_mat, s_int):
'''
Subsampled Randomized Fourier Transform (SRFT) for Dense Matrix
Input
a_mat: m-by-n dense NumPy matrix;
s_int: sketch size.
Output
c_mat: m-by-s sketch C = A * S.
Here S is the sketching matrix (not explicitly formed)
'''
n_int = a_mat.shape[1]
sign_vec = numpy.random.choice(2, n_int) * 2 - 1
idx_vec = numpy.random.choice(n_int, s_int, replace=False)
a_mat = a_mat * sign_vec.reshape(1, n_int)
a_mat = realfft_row(a_mat)
c_mat = a_mat[:, idx_vec] * numpy.sqrt(n_int / s_int)
return c_mat
def srft2(a_mat, b_mat, s_int):
'''
Subsampled Randomized Fourier Transform (SRFT) for Dense Matrix
Input
a_mat: m-by-n dense NumPy matrix;
b_mat: d-by-n dense NumPy matrix;
s_int: sketch size.
Output
c_mat: m-by-s sketch C = A * S;
d_mat: d-by-s sketch D = B * S.
Here S is the sketching matrix (not explicitly formed)
'''
n_int = a_mat.shape[1]
sign_vec = numpy.random.choice(2, n_int) * 2 - 1
idx_vec = numpy.random.choice(n_int, s_int, replace=False)
a_mat = a_mat * sign_vec.reshape(1, n_int)
a_mat = realfft_row(a_mat)
c_mat = a_mat[:, idx_vec] * numpy.sqrt(n_int / s_int)
b_mat = b_mat * sign_vec.reshape(1, n_int)
b_mat = realfft_row(b_mat)
d_mat = b_mat[:, idx_vec] * numpy.sqrt(n_int / s_int)
return c_mat, d_mat
| 0
| 0
| 0
|
72f88fd1c8c07e12acd9c900a420a669aa067518
| 1,162
|
py
|
Python
|
ifsp2019/publication_plot.py
|
andrekorol/flare-hunter
|
530d29275429b934d0ee8a20e21ed3ccc514e40c
|
[
"MIT"
] | null | null | null |
ifsp2019/publication_plot.py
|
andrekorol/flare-hunter
|
530d29275429b934d0ee8a20e21ed3ccc514e40c
|
[
"MIT"
] | 1
|
2021-08-31T19:17:19.000Z
|
2021-08-31T19:17:19.000Z
|
ifsp2019/publication_plot.py
|
andrekorol/flare-hunter
|
530d29275429b934d0ee8a20e21ed3ccc514e40c
|
[
"MIT"
] | null | null | null |
from urldl import download
from pycallisto import fitsfile
callisto_archives = 'http://soleil80.cs.technik.fhnw.ch/' \
'solarradio/data/2002-20yy_Callisto/'
filelist = [
"BLEN7M_20110216_133009_24.fit.gz", "BLEN7M_20110216_134510_24.fit.gz",
"BLEN7M_20110216_140011_24.fit.gz", "BLEN7M_20110216_141512_24.fit.gz",
"BLEN7M_20110216_143014_24.fit.gz", "BLEN7M_20110216_144515_24.fit.gz",
"BLEN7M_20110216_150016_24.fit.gz", "BLEN7M_20110216_151517_24.fit.gz",
"BLEN7M_20110216_153019_24.fit.gz"]
for filename in filelist:
fits_year = filename.split('_')[1][:4]
fits_month = filename.split('_')[1][4:6]
fits_day = filename.split('_')[1][-2:]
fits_url = f'{callisto_archives}/{fits_year}/{fits_month}/' \
f'{fits_day}/{filename}'
download(fits_url)
title = "Flare classe M1.6, 16/02/2011 (BLEN7M)"
plot_filename = "for_publication"
fitsfile.ECallistoFitsFile.plot_fits_files_list(filelist,
title=title,
plot_filename=plot_filename,
show=True)
| 41.5
| 76
| 0.634251
|
from urldl import download
from pycallisto import fitsfile
callisto_archives = 'http://soleil80.cs.technik.fhnw.ch/' \
'solarradio/data/2002-20yy_Callisto/'
filelist = [
"BLEN7M_20110216_133009_24.fit.gz", "BLEN7M_20110216_134510_24.fit.gz",
"BLEN7M_20110216_140011_24.fit.gz", "BLEN7M_20110216_141512_24.fit.gz",
"BLEN7M_20110216_143014_24.fit.gz", "BLEN7M_20110216_144515_24.fit.gz",
"BLEN7M_20110216_150016_24.fit.gz", "BLEN7M_20110216_151517_24.fit.gz",
"BLEN7M_20110216_153019_24.fit.gz"]
for filename in filelist:
fits_year = filename.split('_')[1][:4]
fits_month = filename.split('_')[1][4:6]
fits_day = filename.split('_')[1][-2:]
fits_url = f'{callisto_archives}/{fits_year}/{fits_month}/' \
f'{fits_day}/{filename}'
download(fits_url)
title = "Flare classe M1.6, 16/02/2011 (BLEN7M)"
plot_filename = "for_publication"
fitsfile.ECallistoFitsFile.plot_fits_files_list(filelist,
title=title,
plot_filename=plot_filename,
show=True)
| 0
| 0
| 0
|
81ae7822a3dffbe5464ea6afa6466f1447ad89c3
| 420
|
py
|
Python
|
script.py
|
aptmess/detectime
|
8e0eac3c93a984448731b3311741ee22a7e881f4
|
[
"MIT"
] | 2
|
2021-07-04T16:08:04.000Z
|
2021-08-03T08:42:03.000Z
|
script.py
|
aptmess/detectime
|
8e0eac3c93a984448731b3311741ee22a7e881f4
|
[
"MIT"
] | null | null | null |
script.py
|
aptmess/detectime
|
8e0eac3c93a984448731b3311741ee22a7e881f4
|
[
"MIT"
] | null | null | null |
import logging
import yaml
from detectime.detectime import detectron
from definitions import ROOT_DIR
from detectime.utils import convert_dict_to_tuple
log = logging.getLogger(__name__)
CONFIG_PATH = 'config.yml'
if __name__ == '__main__':
main()
| 20
| 51
| 0.747619
|
import logging
import yaml
from detectime.detectime import detectron
from definitions import ROOT_DIR
from detectime.utils import convert_dict_to_tuple
log = logging.getLogger(__name__)
CONFIG_PATH = 'config.yml'
def main():
with open(ROOT_DIR / CONFIG_PATH) as f:
data = yaml.safe_load(f)
config = convert_dict_to_tuple(dictionary=data)
detectron(config)
if __name__ == '__main__':
main()
| 141
| 0
| 23
|
3fe7567364e0b02ea873d5bd1fc31a5761c15b2e
| 2,090
|
py
|
Python
|
CloneWars.py
|
hallba/UnicornWrightFisher
|
9ed4cc5e21b47cee7f2c70dc8638d031169a4b9c
|
[
"MIT"
] | 4
|
2021-04-09T19:45:47.000Z
|
2021-04-29T11:04:19.000Z
|
CloneWars.py
|
hallba/UnicornWrightFisher
|
9ed4cc5e21b47cee7f2c70dc8638d031169a4b9c
|
[
"MIT"
] | null | null | null |
CloneWars.py
|
hallba/UnicornWrightFisher
|
9ed4cc5e21b47cee7f2c70dc8638d031169a4b9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Clone wars simulator.
Simulates the growth of clones in a 2D space.
Mutations are induced by button presses.
Currently untested.
"""
import splash
splash.splashScreen("CloneWars!",rotation=270)
import signal
import sys
import RPi.GPIO as GPIO
try:
import numpy as np
except ImportError:
import numpyReplace as np
from UnicornWF import UnicornSimulator
# Need to check the pin numbers
RED_BUTTON_GPIO = 21
BLUE_BUTTON_GPIO = 16
GREEN_BUTTON_GPIO = 12
BLACK_BUTTON_GPIO = 25
GPIO.setmode(GPIO.BCM)
buttons = [RED_BUTTON_GPIO, BLUE_BUTTON_GPIO, GREEN_BUTTON_GPIO, BLACK_BUTTON_GPIO]
class DecayMutation(UnicornSimulator):
"""Random mutation turns cells black"""
def mutate(self, colour=0):
"""Select a random cell and change fitness and colour to black."""
cell = np.random.randint(0, self.population)
self.fitness[cell] += np.random.normal(loc=self.advantage, scale=0.1)
if colour == None:
self.colour[cell] = self.mutantColour
else:
self.colour[cell] = colour
self.colourUpdate()
if __name__ == "__main__":
for BUTTON_GPIO in buttons:
GPIO.setup(BUTTON_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_UP)
grid = DecayMutation(16, 30, 0.1, advantage=0.1)
print("setup buttons")
GPIO.add_event_detect(RED_BUTTON_GPIO, GPIO.FALLING,
callback=redMutation, bouncetime=50)
GPIO.add_event_detect(BLUE_BUTTON_GPIO, GPIO.FALLING,
callback=blueMutation, bouncetime=50)
GPIO.add_event_detect(GREEN_BUTTON_GPIO, GPIO.FALLING,
callback=greenMutation, bouncetime=50)
GPIO.add_event_detect(BLACK_BUTTON_GPIO, GPIO.FALLING,
callback=blackMutation, bouncetime=50)
print("enter loop")
grid.runAndProject()
GPIO.cleanup()
| 29.027778
| 83
| 0.688517
|
#!/usr/bin/env python
"""Clone wars simulator.
Simulates the growth of clones in a 2D space.
Mutations are induced by button presses.
Currently untested.
"""
import splash
splash.splashScreen("CloneWars!",rotation=270)
import signal
import sys
import RPi.GPIO as GPIO
try:
import numpy as np
except ImportError:
import numpyReplace as np
from UnicornWF import UnicornSimulator
# Need to check the pin numbers
RED_BUTTON_GPIO = 21
BLUE_BUTTON_GPIO = 16
GREEN_BUTTON_GPIO = 12
BLACK_BUTTON_GPIO = 25
GPIO.setmode(GPIO.BCM)
buttons = [RED_BUTTON_GPIO, BLUE_BUTTON_GPIO, GREEN_BUTTON_GPIO, BLACK_BUTTON_GPIO]
class DecayMutation(UnicornSimulator):
"""Random mutation turns cells black"""
def mutate(self, colour=0):
"""Select a random cell and change fitness and colour to black."""
cell = np.random.randint(0, self.population)
self.fitness[cell] += np.random.normal(loc=self.advantage, scale=0.1)
if colour == None:
self.colour[cell] = self.mutantColour
else:
self.colour[cell] = colour
self.colourUpdate()
if __name__ == "__main__":
for BUTTON_GPIO in buttons:
GPIO.setup(BUTTON_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_UP)
grid = DecayMutation(16, 30, 0.1, advantage=0.1)
def redMutation(channel):
print("red")
grid.mutate(1)
def blueMutation(channel):
print("blue")
grid.mutate(3)
def greenMutation(channel):
grid.mutate(2)
def blackMutation(channel):
grid.mutate(1)
print("setup buttons")
GPIO.add_event_detect(RED_BUTTON_GPIO, GPIO.FALLING,
callback=redMutation, bouncetime=50)
GPIO.add_event_detect(BLUE_BUTTON_GPIO, GPIO.FALLING,
callback=blueMutation, bouncetime=50)
GPIO.add_event_detect(GREEN_BUTTON_GPIO, GPIO.FALLING,
callback=greenMutation, bouncetime=50)
GPIO.add_event_detect(BLACK_BUTTON_GPIO, GPIO.FALLING,
callback=blackMutation, bouncetime=50)
print("enter loop")
grid.runAndProject()
GPIO.cleanup()
| 156
| 0
| 104
|
0d008bce694ff1d6c230937c72eb568aa96b7de2
| 3,146
|
py
|
Python
|
policy/migrations/0001_initial.py
|
agnihotri7/demo-api
|
ffccd7e7a21b99cb8282045b4c3343ff5888c527
|
[
"RSA-MD"
] | null | null | null |
policy/migrations/0001_initial.py
|
agnihotri7/demo-api
|
ffccd7e7a21b99cb8282045b4c3343ff5888c527
|
[
"RSA-MD"
] | null | null | null |
policy/migrations/0001_initial.py
|
agnihotri7/demo-api
|
ffccd7e7a21b99cb8282045b4c3343ff5888c527
|
[
"RSA-MD"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-06 09:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 47.666667
| 219
| 0.579148
|
# Generated by Django 4.0.2 on 2022-02-06 09:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Quote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quote_type', models.CharField(max_length=200, verbose_name='Type')),
('status', models.CharField(choices=[('quoted', 'Quoted'), ('accepted', 'Accepted'), ('paid', 'Payment'), ('activated', 'Activated'), ('cancelled', 'Cancelled'), ('expired', 'Expired')], max_length=30)),
('sum_insured', models.FloatField(verbose_name='Sum insured in Policy')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Quote',
'verbose_name_plural': 'Quotes',
'db_table': 'quote',
},
),
migrations.CreateModel(
name='UserPolicyHistory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('quoted', 'Quoted'), ('accepted', 'Accepted'), ('paid', 'Payment'), ('activated', 'Activated'), ('cancelled', 'Cancelled'), ('expired', 'Expired')], max_length=30)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('quote', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='policy.quote')),
],
options={
'verbose_name': 'Policy History',
'verbose_name_plural': 'Policies Histories',
'db_table': 'policy_history',
},
),
migrations.CreateModel(
name='Policy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('quote', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='policy.quote')),
],
options={
'verbose_name': 'Policy',
'verbose_name_plural': 'Policies',
'db_table': 'policy',
},
),
]
| 0
| 2,966
| 23
|
3b6a9325a976409cce0829d84577cbe4814d5ff2
| 2,660
|
py
|
Python
|
ann/neuralnetwork.py
|
manoloesparta/logicnet
|
97c6ee9be9bce9060c9cc4b51d0344df246500f9
|
[
"MIT"
] | 2
|
2020-07-15T01:41:41.000Z
|
2020-12-10T03:19:40.000Z
|
ann/neuralnetwork.py
|
manoloesparta/logicnet
|
97c6ee9be9bce9060c9cc4b51d0344df246500f9
|
[
"MIT"
] | null | null | null |
ann/neuralnetwork.py
|
manoloesparta/logicnet
|
97c6ee9be9bce9060c9cc4b51d0344df246500f9
|
[
"MIT"
] | null | null | null |
import numpy as np
| 45.862069
| 113
| 0.687594
|
import numpy as np
class NeuralNetwork:
def __init__(self, inputs_nodes, hidden_layer1, hidden_layer2, output_nodes, learning_rate):
self.inputs_nodes = inputs_nodes
self.hidden_layer1 = hidden_layer1
self.hidden_layer2 = hidden_layer2
self.output_nodes = output_nodes
self.learning_rate = learning_rate
self.weights_inputs_layer1 = 2 * np.random.random((self.inputs_nodes, self.hidden_layer1)) - 1
self.weights_layer1_layer2 = 2 * np.random.random((self.hidden_layer1,self.hidden_layer2)) - 1
self.weights_layer2_output = 2 * np.random.random((self.hidden_layer2,self.output_nodes)) - 1
def train(self, input_list, output_list, epochs):
for i in range(epochs):
inputs = np.array(input_list)
outputs = np.array(output_list)
layer1_output = NeuralNetwork.sigmoid(np.dot(inputs, self.weights_inputs_layer1))
layer2_output = NeuralNetwork.sigmoid(np.dot(layer1_output, self.weights_layer1_layer2))
layer3_output = NeuralNetwork.sigmoid(np.dot(layer2_output, self.weights_layer2_output))
layer3_error = outputs - layer3_output
layer3_delta = self.learning_rate * (layer3_error * NeuralNetwork.sigmoid(layer3_output, deriv=True))
layer2_error = np.dot(layer3_delta, self.weights_layer2_output.T)
layer2_delta = self.learning_rate * (layer2_error * NeuralNetwork.sigmoid(layer2_output, deriv=True))
layer1_error = np.dot(layer2_delta, self.weights_layer1_layer2.T)
layer1_delta = self.learning_rate * (layer1_error * NeuralNetwork.sigmoid(layer1_output, deriv=True))
self.weights_layer2_output += layer2_output.T.dot(layer3_delta)
self.weights_layer1_layer2 += layer1_output.T.dot(layer2_delta)
self.weights_inputs_layer1 += inputs.T.dot(layer1_delta)
if (i % (epochs / 10)) == 0:
print("Error: {:.8f}".format(np.mean(np.abs(layer3_error))))
return 'trained'
def predict(self, input_data):
inputs = np.array(input_data)
layer1_output = NeuralNetwork.sigmoid(np.dot(inputs, self.weights_inputs_layer1))
layer2_output = NeuralNetwork.sigmoid(np.dot(layer1_output, self.weights_layer1_layer2))
layer3_output = NeuralNetwork.sigmoid(np.dot(layer2_output, self.weights_layer2_output))
print(layer3_output)
return layer3_output
@staticmethod
def sigmoid(x, deriv=False):
if deriv == True:
return NeuralNetwork.sigmoid(x) * (1 - NeuralNetwork.sigmoid(x))
return 1 / (1 + np.exp(-x))
| 2,493
| 126
| 23
|
17ff2db4a4ef5d618281004b11bf14de2106dc58
| 885
|
py
|
Python
|
lh/tdd.py
|
skyf0cker/Statistical_learning_method
|
8151f3b8595ac086f08d161dc0cb961946f4b7fc
|
[
"MIT"
] | 3
|
2019-03-25T14:15:30.000Z
|
2019-08-29T15:02:47.000Z
|
lh/tdd.py
|
skyf0cker/Statistical_learning_method
|
8151f3b8595ac086f08d161dc0cb961946f4b7fc
|
[
"MIT"
] | null | null | null |
lh/tdd.py
|
skyf0cker/Statistical_learning_method
|
8151f3b8595ac086f08d161dc0cb961946f4b7fc
|
[
"MIT"
] | null | null | null |
from EMAlgorithm import EmAlgorithm
import numpy as np
def create_data(mu0, sigma0, mu1, sigma1, alpha0, alpha1):
'''
初始化数据集
这里通过服从高斯分布的随机函数来伪造数据集
:param mu0: 高斯0的均值
:param sigma0: 高斯0的方差
:param mu1: 高斯1的均值
:param sigma1: 高斯1的方差
:param alpha0: 高斯0的系数
:param alpha1: 高斯1的系数
:return: 混合了两个高斯分布的数据
'''
#定义数据集长度为1000
length = 1000
#初始化第一个高斯分布,生成数据,数据长度为length * alpha系数,以此来
#满足alpha的作用
data0 = np.random.normal(mu0, sigma0, int(length * alpha0))
#第二个高斯分布的数据
data1 = np.random.normal(mu1, sigma1, int(length * alpha1))
#初始化总数据集
#两个高斯分布的数据混合后会放在该数据集中返回
dataSet = []
#将第一个数据集的内容添加进去
dataSet.extend(data0)
#添加第二个数据集的数据
dataSet.extend(data1)
#返回伪造好的数据集
return dataSet
data = create_data(2, 2, 4, 2, 0.6, 0.4)
e = EmAlgorithm(data, 2)
e.train()
# a = e.compute_gama()
# e.update()
| 21.071429
| 63
| 0.656497
|
from EMAlgorithm import EmAlgorithm
import numpy as np
def create_data(mu0, sigma0, mu1, sigma1, alpha0, alpha1):
'''
初始化数据集
这里通过服从高斯分布的随机函数来伪造数据集
:param mu0: 高斯0的均值
:param sigma0: 高斯0的方差
:param mu1: 高斯1的均值
:param sigma1: 高斯1的方差
:param alpha0: 高斯0的系数
:param alpha1: 高斯1的系数
:return: 混合了两个高斯分布的数据
'''
#定义数据集长度为1000
length = 1000
#初始化第一个高斯分布,生成数据,数据长度为length * alpha系数,以此来
#满足alpha的作用
data0 = np.random.normal(mu0, sigma0, int(length * alpha0))
#第二个高斯分布的数据
data1 = np.random.normal(mu1, sigma1, int(length * alpha1))
#初始化总数据集
#两个高斯分布的数据混合后会放在该数据集中返回
dataSet = []
#将第一个数据集的内容添加进去
dataSet.extend(data0)
#添加第二个数据集的数据
dataSet.extend(data1)
#返回伪造好的数据集
return dataSet
data = create_data(2, 2, 4, 2, 0.6, 0.4)
e = EmAlgorithm(data, 2)
e.train()
# a = e.compute_gama()
# e.update()
| 0
| 0
| 0
|
df7dd05fd77b54c6bccf40f3142f23f5cd3af718
| 662
|
py
|
Python
|
fcos_core/data/datasets/create_eccv_index.py
|
touchylk/fcoseccv
|
f9141bf98ffed6bd1292779ac022742c15d4555d
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/data/datasets/create_eccv_index.py
|
touchylk/fcoseccv
|
f9141bf98ffed6bd1292779ac022742c15d4555d
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/data/datasets/create_eccv_index.py
|
touchylk/fcoseccv
|
f9141bf98ffed6bd1292779ac022742c15d4555d
|
[
"BSD-2-Clause"
] | 1
|
2020-10-04T13:23:33.000Z
|
2020-10-04T13:23:33.000Z
|
import os
xmldir = '/media/e813/E/dataset/eccv/eccv/VisDrone2018-VID-val/xmlannotations'
# datasetdir = '/media/e813/E/dataset/eccv/eccv/VisDrone2018-VID-train'
# file = os.path.join(datasetdir,'index.txt')
# f = open(file,'w')
count=0
for seq in os.listdir(xmldir):
seqpath = os.path.join(xmldir,seq)
for n,xml_name in enumerate(os.listdir(seqpath)):
count += 1
if n%4==0:
name = xml_name[:-4]
# f.write('{} {}\n'.format(seq,name))
print(count)
# f.close()
# with open(file) as f:
# xmls = f.readlines()
# xmls =[x.strip("\n") for x in xmls]
# xmls = [x.split(' ') for x in xmls]
# print(xmls[1:10])
| 30.090909
| 78
| 0.610272
|
import os
xmldir = '/media/e813/E/dataset/eccv/eccv/VisDrone2018-VID-val/xmlannotations'
# datasetdir = '/media/e813/E/dataset/eccv/eccv/VisDrone2018-VID-train'
# file = os.path.join(datasetdir,'index.txt')
# f = open(file,'w')
count=0
for seq in os.listdir(xmldir):
seqpath = os.path.join(xmldir,seq)
for n,xml_name in enumerate(os.listdir(seqpath)):
count += 1
if n%4==0:
name = xml_name[:-4]
# f.write('{} {}\n'.format(seq,name))
print(count)
# f.close()
# with open(file) as f:
# xmls = f.readlines()
# xmls =[x.strip("\n") for x in xmls]
# xmls = [x.split(' ') for x in xmls]
# print(xmls[1:10])
| 0
| 0
| 0
|
176c7ac46bee6e48a191724b39af0f412b02198f
| 3,298
|
py
|
Python
|
pepys_admin/maintenance/dialogs/add_dialog.py
|
debrief/pepys-import
|
12d29c0e0f69e1119400334983947893e7679b6b
|
[
"Apache-2.0"
] | 4
|
2021-05-14T08:22:47.000Z
|
2022-02-04T19:48:25.000Z
|
pepys_admin/maintenance/dialogs/add_dialog.py
|
debrief/pepys-import
|
12d29c0e0f69e1119400334983947893e7679b6b
|
[
"Apache-2.0"
] | 1,083
|
2019-11-06T17:01:07.000Z
|
2022-03-25T10:26:51.000Z
|
pepys_admin/maintenance/dialogs/add_dialog.py
|
debrief/pepys-import
|
12d29c0e0f69e1119400334983947893e7679b6b
|
[
"Apache-2.0"
] | 4
|
2019-11-06T12:00:45.000Z
|
2021-06-09T04:18:28.000Z
|
import textwrap
from asyncio import Future
from prompt_toolkit.layout.containers import HSplit
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.widgets import Button, Label
from prompt_toolkit.widgets.dialogs import Dialog
from pepys_admin.maintenance.utils import get_system_name_mappings
from pepys_admin.maintenance.widgets.entry_edit_widget import EntryEditWidget
| 35.462366
| 97
| 0.650091
|
import textwrap
from asyncio import Future
from prompt_toolkit.layout.containers import HSplit
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.widgets import Button, Label
from prompt_toolkit.widgets.dialogs import Dialog
from pepys_admin.maintenance.utils import get_system_name_mappings
from pepys_admin.maintenance.widgets.entry_edit_widget import EntryEditWidget
class AddDialog:
def __init__(self, edit_data, table_object):
"""
A dialog for adding entries to a table
:param column_data: The column_data dictionary for the given table object
:type column_data: dict
:param table_object: SQLAlchemy Table object, such as Platform, Sensor or Nationality
:type table_object: SQLAlchemy Table Object
"""
self.future = Future()
ok_button = Button(text="Add", handler=self.handle_ok)
cancel_button = Button(text="Cancel", handler=self.handle_cancel)
self.edit_data = edit_data
self.required_columns = set(
[value["system_name"] for key, value in self.edit_data.items() if value["required"]]
)
self.entry_edit_widget = EntryEditWidget(self.edit_data, show_required_fields=True)
self.error_message = Label("", style="class:error-message")
instructions = Label(
"Press TAB to move between fields. Required fields are marked with a *.",
style="class:instruction-text-dark",
)
self.body = HSplit([instructions, self.entry_edit_widget, self.error_message], padding=1)
self.dialog = Dialog(
title=f"Add {table_object.__name__}",
body=self.body,
buttons=[ok_button, cancel_button],
width=D(preferred=80),
modal=True,
)
# Get the keybindings for the dialog and add a binding for Esc
# to close the dialog
dialog_kb = self.dialog.container.container.content.key_bindings
@dialog_kb.add("escape")
def _(event) -> None:
self.handle_cancel()
def handle_ok(self):
try:
output = self.entry_edit_widget.output
except Exception:
self.error_message.text = "Error converting values, please edit and try again"
return
provided_cols = set(output.keys())
if self.required_columns.issubset(provided_cols):
# In this case, the user has entered values for all of the required columns
self.future.set_result(output)
else:
# In this case they haven't, so display a sensible error message
diff_list = self.required_columns.difference(provided_cols)
(
system_name_to_display_name,
_,
) = get_system_name_mappings(self.edit_data)
diff_list_display_names = sorted(
[system_name_to_display_name[sys_name] for sys_name in diff_list]
)
diff_list_str = ", ".join(diff_list_display_names)
self.error_message.text = textwrap.fill(
f"Some required values missing: {diff_list_str}", 70
)
def handle_cancel(self):
self.future.set_result(None)
def __pt_container__(self):
return self.dialog
| 1,180
| 1,707
| 23
|
bf0caeb72b9942c65f74bec1e761950a9b7f6e2d
| 624
|
py
|
Python
|
array/mergeTwoSortedArray.py
|
saai/LeetcodePythonSolutions
|
201f2054dda3f303ae6a376b40cbc7f98688322c
|
[
"MIT"
] | null | null | null |
array/mergeTwoSortedArray.py
|
saai/LeetcodePythonSolutions
|
201f2054dda3f303ae6a376b40cbc7f98688322c
|
[
"MIT"
] | null | null | null |
array/mergeTwoSortedArray.py
|
saai/LeetcodePythonSolutions
|
201f2054dda3f303ae6a376b40cbc7f98688322c
|
[
"MIT"
] | null | null | null |
# @param {integer[]} nums1
# @param {integer} m
# @param {integer[]} nums2
# @param {integer} n
# @return {void} Do not return anything, modify nums1 in-place instead.
| 28.363636
| 75
| 0.421474
|
class Solution:
# @param {integer[]} nums1
# @param {integer} m
# @param {integer[]} nums2
# @param {integer} n
# @return {void} Do not return anything, modify nums1 in-place instead.
def merge(self, nums1, m, nums2, n):
p1 = m-1
p2 = n-1
p3 = m+n-1
while(p1>=0 and p2>=0):
if nums1[p1]>nums2[p2]:
nums1[p3] = nums1[p1]
p1 -= 1
else:
nums1[p3] = nums2[p2]
p2 -= 1
p3 -= 1
while (p2>=0):
nums1[p3] = nums2[p2]
p3 -= 1
p2 -= 1
| 395
| -6
| 48
|
b9200d72886d859c7087fdff26f823fe6a74a941
| 4,928
|
py
|
Python
|
molecule/default/tests/test_default.py
|
chas0amx/ansible-postfix
|
b129c57fdddf00447a715cccea0758878de22d0b
|
[
"Apache-2.0"
] | 1
|
2022-02-28T10:22:07.000Z
|
2022-02-28T10:22:07.000Z
|
molecule/default/tests/test_default.py
|
chas0amx/ansible-postfix
|
b129c57fdddf00447a715cccea0758878de22d0b
|
[
"Apache-2.0"
] | 7
|
2021-11-18T07:25:50.000Z
|
2022-03-31T12:25:24.000Z
|
molecule/default/tests/test_default.py
|
chas0amx/ansible-postfix
|
b129c57fdddf00447a715cccea0758878de22d0b
|
[
"Apache-2.0"
] | 1
|
2022-03-02T10:17:23.000Z
|
2022-03-02T10:17:23.000Z
|
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
import json
import pytest
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.fixture()
def get_vars(host):
"""
"""
base_dir, molecule_dir = base_directory()
distribution = host.system_info.distribution
if distribution in ['debian', 'ubuntu']:
os = "debian"
elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']:
os = "redhat"
elif distribution in ['arch']:
os = "archlinux"
print(" -> {} / {}".format(distribution, os))
file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir)
file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir)
file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir)
file_distibution = "file={}/vars/{}.yml name=role_distibution".format(base_dir, os)
defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults")
vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars")
distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution")
molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars")
ansible_vars = defaults_vars
ansible_vars.update(vars_vars)
ansible_vars.update(distibution_vars)
ansible_vars.update(molecule_vars)
templar = Templar(loader=DataLoader(), variables=ansible_vars)
result = templar.template(ansible_vars, fail_on_undefined=False)
return result
def test_directories(host, get_vars):
"""
used config directory
debian based: /etc/mysql
redhat based: /etc/my.cnf.d
arch based : /etc/my.cnf.d
"""
pp_json(get_vars)
directories = [
"/etc/postfix",
"/etc/postfix/maps.d",
"/etc/postfix/postfix-files.d",
"/etc/postfix/dynamicmaps.cf.d"
]
directories.append(get_vars.get("postfix_config_directory"))
for dirs in directories:
d = host.file(dirs)
assert d.is_directory
def test_files(host, get_vars):
"""
created config files
"""
files = [
"/etc/postfix/main.cf",
"/etc/postfix/master.cf",
"/etc/postfix/maps.d/generic",
"/etc/postfix/maps.d/header_checks",
"/etc/postfix/maps.d/sender_canonical_maps",
]
files.append(get_vars.get("postfix_mailname_file"))
files.append(get_vars.get("postfix_aliases_file"))
for _file in files:
f = host.file(_file)
assert f.is_file
def test_user(host, get_vars):
"""
created user
"""
shell = '/usr/sbin/nologin'
distribution = host.system_info.distribution
if distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']:
shell = "/sbin/nologin"
elif distribution == "arch":
shell = "/usr/bin/nologin"
user_name = "postfix"
u = host.user(user_name)
g = host.group(user_name)
assert g.exists
assert u.exists
assert user_name in u.groups
assert u.shell == shell
def test_service_running_and_enabled(host, get_vars):
"""
running service
"""
service_name = "postfix"
service = host.service(service_name)
assert service.is_running
assert service.is_enabled
def test_listening_socket(host, get_vars):
"""
"""
listening = host.socket.get_listening_sockets()
interfaces = host.interface.names()
eth = []
if "eth0" in interfaces:
eth = host.interface("eth0").addresses
for i in listening:
print(i)
for i in interfaces:
print(i)
for i in eth:
print(i)
distribution = host.system_info.distribution
release = host.system_info.release
bind_address = eth[0]
bind_port = 25
socket_name = "private/smtp"
listen = []
listen.append("tcp://{}:{}".format(bind_address, bind_port))
if not (distribution == 'ubuntu' and release == '18.04'):
listen.append("unix://{}".format(socket_name))
for spec in listen:
socket = host.socket(spec)
assert socket.is_listening
| 26.494624
| 114
| 0.656859
|
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
import json
import pytest
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def pp_json(json_thing, sort=True, indents=2):
if type(json_thing) is str:
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_thing, sort_keys=sort, indent=indents))
return None
def base_directory():
cwd = os.getcwd()
if('group_vars' in os.listdir(cwd)):
directory = "../.."
molecule_directory = "."
else:
directory = "."
molecule_directory = "molecule/{}".format(os.environ.get('MOLECULE_SCENARIO_NAME'))
return directory, molecule_directory
@pytest.fixture()
def get_vars(host):
"""
"""
base_dir, molecule_dir = base_directory()
distribution = host.system_info.distribution
if distribution in ['debian', 'ubuntu']:
os = "debian"
elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']:
os = "redhat"
elif distribution in ['arch']:
os = "archlinux"
print(" -> {} / {}".format(distribution, os))
file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir)
file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir)
file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir)
file_distibution = "file={}/vars/{}.yml name=role_distibution".format(base_dir, os)
defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults")
vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars")
distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution")
molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars")
ansible_vars = defaults_vars
ansible_vars.update(vars_vars)
ansible_vars.update(distibution_vars)
ansible_vars.update(molecule_vars)
templar = Templar(loader=DataLoader(), variables=ansible_vars)
result = templar.template(ansible_vars, fail_on_undefined=False)
return result
def test_directories(host, get_vars):
"""
used config directory
debian based: /etc/mysql
redhat based: /etc/my.cnf.d
arch based : /etc/my.cnf.d
"""
pp_json(get_vars)
directories = [
"/etc/postfix",
"/etc/postfix/maps.d",
"/etc/postfix/postfix-files.d",
"/etc/postfix/dynamicmaps.cf.d"
]
directories.append(get_vars.get("postfix_config_directory"))
for dirs in directories:
d = host.file(dirs)
assert d.is_directory
def test_files(host, get_vars):
"""
created config files
"""
files = [
"/etc/postfix/main.cf",
"/etc/postfix/master.cf",
"/etc/postfix/maps.d/generic",
"/etc/postfix/maps.d/header_checks",
"/etc/postfix/maps.d/sender_canonical_maps",
]
files.append(get_vars.get("postfix_mailname_file"))
files.append(get_vars.get("postfix_aliases_file"))
for _file in files:
f = host.file(_file)
assert f.is_file
def test_user(host, get_vars):
"""
created user
"""
shell = '/usr/sbin/nologin'
distribution = host.system_info.distribution
if distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']:
shell = "/sbin/nologin"
elif distribution == "arch":
shell = "/usr/bin/nologin"
user_name = "postfix"
u = host.user(user_name)
g = host.group(user_name)
assert g.exists
assert u.exists
assert user_name in u.groups
assert u.shell == shell
def test_service_running_and_enabled(host, get_vars):
"""
running service
"""
service_name = "postfix"
service = host.service(service_name)
assert service.is_running
assert service.is_enabled
def test_listening_socket(host, get_vars):
"""
"""
listening = host.socket.get_listening_sockets()
interfaces = host.interface.names()
eth = []
if "eth0" in interfaces:
eth = host.interface("eth0").addresses
for i in listening:
print(i)
for i in interfaces:
print(i)
for i in eth:
print(i)
distribution = host.system_info.distribution
release = host.system_info.release
bind_address = eth[0]
bind_port = 25
socket_name = "private/smtp"
listen = []
listen.append("tcp://{}:{}".format(bind_address, bind_port))
if not (distribution == 'ubuntu' and release == '18.04'):
listen.append("unix://{}".format(socket_name))
for spec in listen:
socket = host.socket(spec)
assert socket.is_listening
| 528
| 0
| 46
|
c7055528d28431a2d229dc6240bb39f42df97a2f
| 42,845
|
py
|
Python
|
tests/pydevtest/test_chunkydevtest.py
|
PlantandFoodResearch/irods
|
9dfe7ffe5aa0760b7493bd9392ea1270df9335d4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pydevtest/test_chunkydevtest.py
|
PlantandFoodResearch/irods
|
9dfe7ffe5aa0760b7493bd9392ea1270df9335d4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pydevtest/test_chunkydevtest.py
|
PlantandFoodResearch/irods
|
9dfe7ffe5aa0760b7493bd9392ea1270df9335d4
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
if (sys.version_info >= (2,7)):
import unittest
else:
import unittest2 as unittest
import pydevtest_sessions as s
from pydevtest_common import assertiCmd, assertiCmdFail, interruptiCmd
from resource_suite import ResourceBase
import commands
import os, stat
import datetime
import time
import shutil
import random
| 49.474596
| 180
| 0.61328
|
import sys
if (sys.version_info >= (2,7)):
import unittest
else:
import unittest2 as unittest
import pydevtest_sessions as s
from pydevtest_common import assertiCmd, assertiCmdFail, interruptiCmd
from resource_suite import ResourceBase
import commands
import os, stat
import datetime
import time
import shutil
import random
class ChunkyDevTest(ResourceBase):
def test_beginning_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# test basic informational commands
assertiCmd(s.adminsession,"iinit -l", "LIST", s.adminsession.getUserName() )
assertiCmd(s.adminsession,"iinit -l", "LIST", s.adminsession.getZoneName() )
assertiCmd(s.adminsession,"iinit -l", "LIST", s.adminsession.getDefResource() )
res = s.adminsession.runCmd('ils', ['-V'])
assert (res[0].count('NOTICE: irodsHost') == 1
and res[0].count('NOTICE: irodsPort') == 1
and res[0].count('NOTICE: irodsDefResource') == 1)
# begin original devtest
assertiCmd(s.adminsession,"ilsresc", "LIST", self.testresc)
assertiCmd(s.adminsession,"ilsresc -l", "LIST", self.testresc)
assertiCmd(s.adminsession,"imiscsvrinfo", "LIST", ["relVersion"] )
assertiCmd(s.adminsession,"iuserinfo", "LIST", "name: "+username )
assertiCmd(s.adminsession,"ienv", "LIST", "irodsZone" )
assertiCmd(s.adminsession,"ipwd", "LIST", "home" )
assertiCmd(s.adminsession,"ihelp ils", "LIST", "ils" )
assertiCmd(s.adminsession,"ierror -14000", "LIST", "SYS_API_INPUT_ERR" )
assertiCmd(s.adminsession,"iexecmd hello", "LIST", "Hello world" )
assertiCmd(s.adminsession,"ips -v", "LIST", "ips" )
assertiCmd(s.adminsession,"iqstat", "LIST", "No delayed rules pending for user rods" )
# put and list basic file information
assertiCmd(s.adminsession,"ils -AL","LIST","home") # debug
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ichksum -f "+irodshome+"/icmdtest/foo1", "LIST", "performed = 1" )
assertiCmd(s.adminsession,"iput -kf "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils "+irodshome+"/icmdtest/foo1" , "LIST", "foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", ["foo1",myssize] )
assertiCmd(s.adminsession,"iadmin ls "+irodshome+"/icmdtest", "LIST", "foo1" )
assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest/foo1", "LIST", username+"#"+irodszone+":own" )
assertiCmd(s.adminsession,"ichmod read "+testuser1+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest/foo1", "LIST", testuser1+"#"+irodszone+":read" )
# basic replica
assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" --rlock "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", self.testresc )
# overwrite a copy
assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" -N1 "+irodshome+"/icmdtest/foo1" )
assertiCmdFail(s.adminsession,"ils -L "+irodshome+"/icmdtest/foo1", "LIST", irodsdefresource )
assertiCmd(s.adminsession,"iphymv -R "+irodsdefresource+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", irodsdefresource[0:19] )
# basic metadata shuffle
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest/foo1 testmeta1 180 cm" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["testmeta1"] )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["180"] )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["cm"] )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
# new file mode check
assertiCmd(s.adminsession,"iget -fK --rlock "+irodshome+"/icmdtest/foo2 /tmp/" )
assert oct(stat.S_IMODE(os.stat("/tmp/foo2").st_mode)) == '0640'
os.unlink( "/tmp/foo2" )
assertiCmd(s.adminsession,"ils "+irodshome+"/icmdtest/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest/foo2 "+irodshome+"/icmdtest/foo4" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo4", "LIST", "foo4" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest/foo4 "+irodshome+"/icmdtest/foo2" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"ichksum "+irodshome+"/icmdtest/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest/foo2 testmeta1 180 cm" )
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest/foo1 testmeta2 hello" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["testmeta1"] )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["hello"] )
assertiCmd(s.adminsession,"imeta qu -d testmeta1 = 180", "LIST", "foo1" )
assertiCmd(s.adminsession,"imeta qu -d testmeta2 = hello", "LIST", "dataObj: foo1" )
assertiCmd(s.adminsession,"iget -f -K --rlock "+irodshome+"/icmdtest/foo2 "+dir_w )
assert myssize == str(os.stat(dir_w+"/foo2").st_size)
os.unlink( dir_w+"/foo2" )
# we have foo1 in $irodsdefresource and foo2 in testresource
# cleanup
os.unlink( sfile2 )
def test_iput_ibun_gzip_bzip2_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
# we put foo1 in $irodsdefresource and foo2 in testresource
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" "+irodshome+"/icmdtest/foo1" )
phypath = dir_w+"/"+"foo1."+str(random.randrange(10000000))
assertiCmd(s.adminsession,"iput -kfR "+irodsdefresource+" "+sfile2+" "+irodshome+"/icmdtest/foo1" )
# show have 2 different copies
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", ["foo1",myssize] )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", ["foo1",str(os.stat(sfile2).st_size)] )
# update all old copies
assertiCmd(s.adminsession,"irepl -U "+irodshome+"/icmdtest/foo1" )
# make sure the old size is not there
assertiCmdFail(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", myssize )
assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" "+irodshome+"/icmdtest/foo1" )
# bulk test
assertiCmd(s.adminsession,"iput -bIvPKr "+mysdir+" "+irodshome+"/icmdtest", "LIST", "Bulk upload" )
# iput with a lot of options
rsfile = dir_w+"/rsfile"
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iput -PkITr -X "+rsfile+" --retries 10 "+mysdir+" "+irodshome+"/icmdtestw", "LIST", "Processing" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestw "+irodshome+"/icmdtestw1" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestw1", "LIST", "sfile10" )
assertiCmd(s.adminsession,"ils -Ar "+irodshome+"/icmdtestw1", "LIST", "sfile10" )
assertiCmd(s.adminsession,"irm -rvf "+irodshome+"/icmdtestw1", "LIST", "num files done" )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iget -vIKPfr -X rsfile --retries 10 "+irodshome+"/icmdtest "+dir_w+"/testx", "LIST", "opened" )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
commands.getstatusoutput( "tar -chf "+dir_w+"/testx.tar -C "+dir_w+"/testx ." )
assertiCmd(s.adminsession,"iput "+dir_w+"/testx.tar "+irodshome+"/icmdtestx.tar" )
assertiCmd(s.adminsession,"ibun -x "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestx" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestx", "LIST", ["foo2"] )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestx", "LIST", ["sfile10"] )
assertiCmd(s.adminsession,"ibun -cDtar "+irodshome+"/icmdtestx1.tar "+irodshome+"/icmdtestx" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtestx1.tar", "LIST", "testx1.tar" )
if os.path.exists(dir_w+"/testx1"):
shutil.rmtree(dir_w+"/testx1")
os.mkdir( dir_w+"/testx1" )
if os.path.isfile( dir_w+"/testx1.tar" ):
os.unlink( dir_w+"/testx1.tar" )
assertiCmd(s.adminsession,"iget "+irodshome+"/icmdtestx1.tar "+dir_w+"/testx1.tar" )
commands.getstatusoutput( "tar -xvf "+dir_w+"/testx1.tar -C "+dir_w+"/testx1" )
output = commands.getstatusoutput( "diff -r "+dir_w+"/testx "+dir_w+"/testx1/icmdtestx" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
# test ibun with gzip
assertiCmd(s.adminsession,"ibun -cDgzip "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestx" )
assertiCmd(s.adminsession,"ibun -x "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestgz")
if os.path.isfile( "icmdtestgz" ):
os.unlink( "icmdtestgz" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestgz "+dir_w+"", "LIST", "icmdtestgz")
output = commands.getstatusoutput( "diff -r "+dir_w+"/testx "+dir_w+"/icmdtestgz/icmdtestx" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/icmdtestgz")
assertiCmd(s.adminsession,"ibun --add "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestgz")
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestgz")
# test ibun with bzip2
assertiCmd(s.adminsession,"ibun -cDbzip2 "+irodshome+"/icmdtestx1.tar.bz2 "+irodshome+"/icmdtestx")
assertiCmd(s.adminsession,"ibun -xb "+irodshome+"/icmdtestx1.tar.bz2 "+irodshome+"/icmdtestbz2")
if os.path.isfile( "icmdtestbz2" ):
os.unlink( "icmdtestbz2" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestbz2 "+dir_w+"", "LIST", "icmdtestbz2")
output = commands.getstatusoutput( "diff -r "+dir_w+"/testx "+dir_w+"/icmdtestbz2/icmdtestx" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/icmdtestbz2" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestx1.tar.bz2")
assertiCmd(s.adminsession,"iphybun -R "+self.anotherresc+" -Dbzip2 "+irodshome+"/icmdtestbz2" )
assertiCmd(s.adminsession,"itrim -N1 -S "+self.testresc+" -r "+irodshome+"/icmdtestbz2", "LIST", "Total size trimmed" )
assertiCmd(s.adminsession,"itrim -N1 -S "+irodsdefresource+" -r "+irodshome+"/icmdtestbz2", "LIST", "Total size trimmed" )
# get the name of bundle file
output = commands.getstatusoutput( "ils -L "+irodshome+"/icmdtestbz2/icmdtestx/foo1 | tail -n1 | awk '{ print $NF }'")
print output[1]
bunfile = output[1]
assertiCmd(s.adminsession,"ils --bundle "+bunfile, "LIST", "Subfiles" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestbz2")
assertiCmd(s.adminsession,"irm -f --empty "+bunfile )
# cleanup
os.unlink( dir_w+"/testx1.tar" )
os.unlink( dir_w+"/testx.tar" )
shutil.rmtree( dir_w+"/testx1" )
shutil.rmtree( dir_w+"/testx" )
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_ireg_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
commands.getstatusoutput( "mv "+sfile2+" /tmp/sfile2" )
commands.getstatusoutput( "cp /tmp/sfile2 /tmp/sfile2r" )
assertiCmd(s.adminsession,"ireg -KR "+self.testresc+" /tmp/sfile2 "+irodshome+"/foo5" ) # <-- FAILING - REASON FOR SKIPPING
commands.getstatusoutput( "cp /tmp/sfile2 /tmp/sfile2r" )
assertiCmd(s.adminsession,"ireg -KR "+self.anotherresc+" --repl /tmp/sfile2r "+irodshome+"/foo5" )
assertiCmd(s.adminsession,"iget -fK "+irodshome+"/foo5 "+dir_w+"/foo5" )
output = commands.getstatusoutput("diff /tmp/sfile2 "+dir_w+"/foo5")
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"ireg -KCR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtesta" )
if os.path.exists(dir_w+"/testa"):
shutil.rmtree( dir_w+"/testa" )
assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtesta "+dir_w+"/testa", "LIST", "testa" )
output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testa" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/testa" )
# test ireg with normal user
testuser2home = "/"+irodszone+"/home/"+s.sessions[2].getUserName()
commands.getstatusoutput( "cp /tmp/sfile2 /tmp/sfile2c" )
assertiCmd(s.sessions[2],"ireg -KR "+self.testresc+" /tmp/sfile2c "+testuser2home+"/foo5", "ERROR", "PATH_REG_NOT_ALLOWED" )
assertiCmd(s.sessions[2],"iput -R "+self.testresc+" /tmp/sfile2c "+testuser2home+"/foo5" )
assertiCmd(s.sessions[2],"irm -f "+testuser2home+"/foo5" )
# cleanup
os.unlink( "/tmp/sfile2c" )
os.unlink( dir_w+"/foo5" )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_mcoll_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
assertiCmd(s.adminsession,"imkdir icmdtest")
# we put foo1 in $irodsdefresource and foo2 in testresource
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
# prepare icmdtesta
assertiCmd(s.adminsession,"ireg -KCR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtesta" )
# mcoll test
assertiCmd(s.adminsession,"imcoll -m link "+irodshome+"/icmdtesta "+irodshome+"/icmdtestb" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestb", "LIST", "icmdtestb" )
if os.path.exists(dir_w+"/testb"):
shutil.rmtree( dir_w+"/testb" )
assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtestb "+dir_w+"/testb", "LIST", "testb" )
output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testb" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestb" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestb" )
shutil.rmtree( dir_w+"/testb" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestm" )
assertiCmd(s.adminsession,"imcoll -m filesystem -R "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtestm" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestm/testmm" )
assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestm/testmm/foo1" )
assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestm/testmm/foo11" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm/foo1 "+irodshome+"/icmdtestm/testmm/foo2" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm "+irodshome+"/icmdtestm/testmm1" )
# mv to normal collection
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm1/foo2 "+irodshome+"/icmdtest/foo100" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo100", "LIST", "foo100" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm1 "+irodshome+"/icmdtest/testmm1" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtest/testmm1", "LIST", "foo11" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtest/testmm1 "+irodshome+"/icmdtest/foo100" )
if os.path.exists(dir_w+"/testm"):
shutil.rmtree( dir_w+"/testm" )
assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtesta "+dir_w+"/testm", "LIST", "testm")
output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testm" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestm" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestm" )
shutil.rmtree( dir_w+"/testm" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_mcol" )
assertiCmd(s.adminsession,"ibun -c "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtest" ) # added so icmdtestx.tar exists
assertiCmd(s.adminsession,"imcoll -m tar "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestt_mcol" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestt_mcol", "LIST", ["foo2"] )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestt_mcol", "LIST", ["foo1"] )
if os.path.exists(dir_w+"/testt"):
shutil.rmtree( dir_w+"/testt" )
if os.path.exists(dir_w+"/testx"):
shutil.rmtree( dir_w+"/testx" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtest "+dir_w+"/testx", "LIST", "testx" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestt_mcol/icmdtest "+dir_w+"/testt", "LIST", "testt" )
output = commands.getstatusoutput("diff -r "+dir_w+"/testx "+dir_w+"/testt" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_mcol/mydirtt" )
assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mt" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mt "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mtx" )
# unlink
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestt_mcol" )
# cleanup
os.unlink( sfile2 )
shutil.rmtree( dir_w+"/testt" )
shutil.rmtree( dir_w+"/testx" )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_large_dir_and_mcoll_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# we put foo1 in $irodsdefresource and foo2 in testresource
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
assertiCmd(s.adminsession,"ibun -c "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtest" ) # added so icmdtestx.tar exists
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imcoll -m tar "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_large/mydirtt" )
# make a directory of 2 large files and 2 small files
lfile = dir_w+"/lfile"
lfile1 = dir_w+"/lfile1"
commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile )
for i in range(6):
commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 )
os.rename ( lfile1, lfile )
os.mkdir( myldir )
for i in range(1,3):
mylfile = myldir+"/lfile"+str(i)
mysfile = myldir+"/sfile"+str(i)
if i != 2:
shutil.copyfile( lfile, mylfile )
else:
os.rename( lfile, mylfile )
shutil.copyfile( progname, mysfile )
# test adding a large file to a mounted collection
assertiCmd(s.adminsession,"iput "+myldir+"/lfile1 "+irodshome+"/icmdtestt_large/mydirtt" )
assertiCmd(s.adminsession,"iget "+irodshome+"/icmdtestt_large/mydirtt/lfile1 "+dir_w+"/testt" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestt_large/mydirtt" )
assertiCmd(s.adminsession,"imcoll -s "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imcoll -p "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestt_large" )
os.unlink( dir_w+"/testt" )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
def test_phybun_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
# iphybun test
assertiCmd(s.adminsession,"iput -rR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtestp" )
assertiCmd(s.adminsession,"iphybun -KR "+self.anotherresc+" "+irodshome+"/icmdtestp" )
assertiCmd(s.adminsession,"itrim -rS "+self.testresc+" -N1 "+irodshome+"/icmdtestp", "LIST", "files trimmed" )
output = commands.getstatusoutput( "ils -L "+irodshome+"/icmdtestp/sfile1 | tail -n1 | awk '{ print $NF }'")
print output[1]
bunfile = output[1]
assertiCmd(s.adminsession,"irepl --purgec -R "+self.anotherresc+" "+bunfile )
assertiCmd(s.adminsession,"itrim -rS "+self.testresc+" -N1 "+irodshome+"/icmdtestp", "LIST", "files trimmed" )
# get the name of bundle file
assertiCmd(s.adminsession,"irm -f --empty "+bunfile )
# should not be able to remove it because it is not empty
assertiCmd(s.adminsession,"ils "+bunfile, "LIST", bunfile )
assertiCmd(s.adminsession,"irm -rvf "+irodshome+"/icmdtestp", "LIST", "num files done" )
assertiCmd(s.adminsession,"irm -f --empty "+bunfile )
if os.path.exists(dir_w+"/testp"):
shutil.rmtree( dir_w+"/testp" )
shutil.rmtree( mysdir )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_irsync_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# testing irsync
assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest/foo100" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 "+dir_w+"/foo100" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 i:"+irodshome+"/icmdtest/foo200" )
assertiCmd(s.adminsession,"irm -f "+irodshome+"/icmdtest/foo100 "+irodshome+"/icmdtest/foo200")
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+progname+" "+irodshome+"/icmdtest/foo100")
assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest/foo100" )
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+progname+" "+irodshome+"/icmdtest/foo200")
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 i:"+irodshome+"/icmdtest/foo200" )
os.unlink( dir_w+"/foo100" )
# cleanup
os.unlink( sfile2 )
def test_xml_protocol_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
lrsfile = dir_w+"/lrsfile"
rsfile = dir_w+"/rsfile"
# do test using xml protocol
os.environ['irodsProt'] = "1"
assertiCmd(s.adminsession,"ilsresc", "LIST", self.testresc )
assertiCmd(s.adminsession,"imiscsvrinfo", "LIST", "relVersion" )
assertiCmd(s.adminsession,"iuserinfo", "LIST", "name: "+username )
assertiCmd(s.adminsession,"ienv", "LIST", "Release Version" )
assertiCmd(s.adminsession,"icd "+irodshome )
assertiCmd(s.adminsession,"ipwd", "LIST", "home" )
assertiCmd(s.adminsession,"ihelp ils", "LIST", "ils" )
assertiCmd(s.adminsession,"ierror -14000", "LIST", "SYS_API_INPUT_ERR" )
assertiCmd(s.adminsession,"iexecmd hello", "LIST", "Hello world" )
assertiCmd(s.adminsession,"ips -v", "LIST", "ips" )
assertiCmd(s.adminsession,"iqstat", "LIST", "No delayed rules" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtest1" )
# make a directory of large files
assertiCmd(s.adminsession,"iput -kf "+progname+" "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest1/foo1", "LIST", ["foo1", myssize] )
assertiCmd(s.adminsession,"iadmin ls "+irodshome+"/icmdtest1", "LIST", "foo1" )
assertiCmd(s.adminsession,"ichmod read "+s.sessions[1].getUserName()+" "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest1/foo1", "LIST", s.sessions[1].getUserName()+"#"+irodszone+":read" )
assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" "+irodshome+"/icmdtest1/foo1" )
# overwrite a copy
assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" -N1 "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"iphymv -R "+irodsdefresource+" "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest1/foo1 testmeta1 180 cm" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "testmeta1" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "180" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "cm" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest1/foo1 "+irodshome+"/icmdtest1/foo2" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest1/foo2 "+irodshome+"/icmdtest1/foo4" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest1/foo4 "+irodshome+"/icmdtest1/foo2" )
assertiCmd(s.adminsession,"ichksum -K "+irodshome+"/icmdtest1/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"iget -f -K "+irodshome+"/icmdtest1/foo2 "+dir_w )
os.unlink ( dir_w+"/foo2" )
assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest1/foo1 /tmp/foo1" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest1/foo1 i:"+irodshome+"/icmdtest1/foo2" )
os.unlink ( "/tmp/foo1" )
os.environ['irodsProt'] = "0"
# cleanup
os.unlink( sfile2 )
def test_large_files_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory of 2 large files and 2 small files
lfile = dir_w+"/lfile"
lfile1 = dir_w+"/lfile1"
commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile )
for i in range(6):
commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 )
os.rename ( lfile1, lfile )
os.mkdir( myldir )
for i in range(1,3):
mylfile = myldir+"/lfile"+str(i)
mysfile = myldir+"/sfile"+str(i)
if i != 2:
shutil.copyfile( lfile, mylfile )
else:
os.rename( lfile, mylfile )
shutil.copyfile( progname, mysfile )
# do the large files tests
lrsfile = dir_w+"/lrsfile"
rsfile = dir_w+"/rsfile"
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iput -vbPKr --retries 10 --wlock -X "+rsfile+" --lfrestart "+lrsfile+" -N 2 "+myldir+" "+irodshome+"/icmdtest/testy", "LIST", "New restartFile" )
assertiCmd(s.adminsession,"ichksum -rK "+irodshome+"/icmdtest/testy", "LIST", "Total checksum performed" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"irepl -BvrPT -R "+self.testresc+" --rlock "+irodshome+"/icmdtest/testy", "LIST", "icmdtest/testy" )
assertiCmd(s.adminsession,"itrim -vrS "+irodsdefresource+" --dryrun --age 1 -N 1 "+irodshome+"/icmdtest/testy", "LIST", "This is a DRYRUN" )
assertiCmd(s.adminsession,"itrim -vrS "+irodsdefresource+" -N 1 "+irodshome+"/icmdtest/testy", "LIST", "a copy trimmed" )
assertiCmd(s.adminsession,"icp -vKPTr -N 2 "+irodshome+"/icmdtest/testy "+irodshome+"/icmdtest/testz", "LIST", "Processing lfile1" )
assertiCmd(s.adminsession,"irsync -r i:"+irodshome+"/icmdtest/testy i:"+irodshome+"/icmdtest/testz" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testy" )
assertiCmd(s.adminsession,"iphymv -vrS "+irodsdefresource+" -R "+self.testresc+" "+irodshome+"/icmdtest/testz", "LIST", "icmdtest/testz" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
if os.path.exists(dir_w+"/testz"):
shutil.rmtree( dir_w+"/testz" )
assertiCmd(s.adminsession,"iget -vPKr --retries 10 -X "+rsfile+" --lfrestart "+lrsfile+" --rlock -N 2 "+irodshome+"/icmdtest/testz "+dir_w+"/testz", "LIST", "testz" )
assertiCmd(s.adminsession,"irsync -r "+dir_w+"/testz i:"+irodshome+"/icmdtest/testz" )
assertiCmd(s.adminsession,"irsync -r i:"+irodshome+"/icmdtest/testz "+dir_w+"/testz" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
output = commands.getstatusoutput( "diff -r "+dir_w+"/testz "+myldir )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
# test -N0 transfer
assertiCmd(s.adminsession,"iput -N0 -R "+self.testresc+" "+myldir+"/lfile1 "+irodshome+"/icmdtest/testz/lfoo100" )
if os.path.isfile( dir_w+"/lfoo100" ):
os.unlink( dir_w+"/lfoo100" )
assertiCmd(s.adminsession,"iget -N0 "+irodshome+"/icmdtest/testz/lfoo100 "+dir_w+"/lfoo100" )
output = commands.getstatusoutput( "diff "+myldir+"/lfile1 "+dir_w+"/lfoo100" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/testz" )
os.unlink( dir_w+"/lfoo100" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testz" )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
def test_large_files_with_RBUDP_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory of 2 large files and 2 small files
lfile = dir_w+"/lfile"
lfile1 = dir_w+"/lfile1"
commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile )
for i in range(6):
commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 )
os.rename ( lfile1, lfile )
os.mkdir( myldir )
for i in range(1,3):
mylfile = myldir+"/lfile"+str(i)
mysfile = myldir+"/sfile"+str(i)
if i != 2:
shutil.copyfile( lfile, mylfile )
else:
os.rename( lfile, mylfile )
shutil.copyfile( progname, mysfile )
# do the large files tests using RBUDP
lrsfile = dir_w+"/lrsfile"
rsfile = dir_w+"/rsfile"
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iput -vQPKr --retries 10 -X "+rsfile+" --lfrestart "+lrsfile+" "+myldir+" "+irodshome+"/icmdtest/testy", "LIST", "icmdtest/testy" )
assertiCmd(s.adminsession,"irepl -BQvrPT -R "+self.testresc+" "+irodshome+"/icmdtest/testy", "LIST", "icmdtest/testy" )
assertiCmd(s.adminsession,"itrim -vrS "+irodsdefresource+" -N 1 "+irodshome+"/icmdtest/testy", "LIST", "a copy trimmed" )
assertiCmd(s.adminsession,"icp -vQKPTr "+irodshome+"/icmdtest/testy "+irodshome+"/icmdtest/testz", "LIST", "Processing sfile1" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testy" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
if os.path.exists(dir_w+"/testz"):
shutil.rmtree( dir_w+"/testz" )
assertiCmd(s.adminsession,"iget -vQPKr --retries 10 -X "+rsfile+" --lfrestart "+lrsfile+" "+irodshome+"/icmdtest/testz "+dir_w+"/testz", "LIST", "Processing sfile2" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
output = commands.getstatusoutput( "diff -r "+dir_w+"/testz "+myldir )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/testz" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testz" )
shutil.rmtree( myldir )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
| 41,733
| 13
| 297
|
df366a12904a7d087fb699c3ddcbd77bc7584ac9
| 1,920
|
py
|
Python
|
app/models/trade_models.py
|
VaughnDV/instrument-trader
|
1d4d4b8a36ee834c7111768e9f0d8bc15d027893
|
[
"MIT"
] | null | null | null |
app/models/trade_models.py
|
VaughnDV/instrument-trader
|
1d4d4b8a36ee834c7111768e9f0d8bc15d027893
|
[
"MIT"
] | null | null | null |
app/models/trade_models.py
|
VaughnDV/instrument-trader
|
1d4d4b8a36ee834c7111768e9f0d8bc15d027893
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, ForeignKey, Integer, String, Enum, Float, DateTime, func
from sqlalchemy.orm import relationship
import enum
from app.database import Base
| 31.47541
| 87
| 0.717708
|
from sqlalchemy import Column, ForeignKey, Integer, String, Enum, Float, DateTime, func
from sqlalchemy.orm import relationship
import enum
from app.database import Base
class Trader(Base):
__tablename__ = "trader"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, index=True)
trade = relationship("Trade", back_populates="trader")
class Instrument(Base):
__tablename__ = "instrument"
id = Column(String, primary_key=True, index=True)
name = Column(String, unique=True, index=True)
trade = relationship("Trade", back_populates="instrument")
class TradeBuySellEnum(enum.Enum):
BUY = "buy"
SELL = "sell"
class AssetClassEnum(enum.Enum):
BOND = "bond"
EQUITY = "equity"
FX = "fx"
class TradeDetail(Base):
__tablename__ = "trade_detail"
id = Column(Integer, primary_key=True, index=True)
buy_sell_indicator = Column(Enum(TradeBuySellEnum), nullable=False)
price = Column(Float(precision=2), nullable=False)
quantity = Column(Integer, nullable=False)
trade = relationship("Trade", uselist=False, back_populates="trade_detail")
class Trade(Base):
__tablename__ = "trade"
trade_id = Column(Integer, primary_key=True, index=True)
asset_class = Column(Enum(AssetClassEnum), nullable=False)
counterparty = Column(String, nullable=True)
trade_date_time = Column(DateTime, default=func.now())
# One to One with TradeDetail
trade_detail_id = Column(Integer, ForeignKey("trade_detail.id"))
trade_detail = relationship("TradeDetail", back_populates="trade")
# Many to One with Instrument
instrument_id = Column(Integer, ForeignKey("instrument.id"))
instrument = relationship("Instrument", back_populates="trade")
# Many to One with Trader
trader_id = Column(Integer, ForeignKey("trader.name"))
trader = relationship("Trader", back_populates="trade")
| 0
| 1,606
| 138
|
15c2fef63b2d4ac40cf07d490bc31012681666a9
| 7,940
|
py
|
Python
|
APC400000/MelodicComponent.py
|
martinpechmann/APC400000
|
0783dd2f7c3846684f785b15e651c61edf95e27c
|
[
"BSD-Source-Code"
] | 6
|
2019-09-15T18:46:49.000Z
|
2021-09-10T06:36:10.000Z
|
APC400000/MelodicComponent.py
|
martinpechmann/APC400000
|
0783dd2f7c3846684f785b15e651c61edf95e27c
|
[
"BSD-Source-Code"
] | 3
|
2015-06-14T22:47:01.000Z
|
2015-06-17T14:24:47.000Z
|
APC400000/MelodicComponent.py
|
martinpechmann/APC400000
|
0783dd2f7c3846684f785b15e651c61edf95e27c
|
[
"BSD-Source-Code"
] | 1
|
2016-12-21T12:18:14.000Z
|
2016-12-21T12:18:14.000Z
|
# Embedded file name: c:\Jenkins\live\output\win_32_static\Release\midi-remote-scripts\Push\MelodicComponent.py
from __future__ import with_statement
from _Framework.Util import forward_property, find_if
from _Framework.SubjectSlot import subject_slot
from _Framework.ModesComponent import ModesComponent, LayerMode
from MessageBoxComponent import Messenger
from MatrixMaps import FEEDBACK_CHANNELS, NON_FEEDBACK_CHANNEL
from InstrumentComponent import InstrumentComponent
from NoteEditorComponent import NoteEditorComponent
from PlayheadComponent import PlayheadComponent
from MelodicPattern import pitch_index_to_string
from LoopSelectorComponent import LoopSelectorComponent
from NoteEditorPaginator import NoteEditorPaginator
NUM_NOTE_EDITORS = 7
| 44.111111
| 233
| 0.693955
|
# Embedded file name: c:\Jenkins\live\output\win_32_static\Release\midi-remote-scripts\Push\MelodicComponent.py
from __future__ import with_statement
from _Framework.Util import forward_property, find_if
from _Framework.SubjectSlot import subject_slot
from _Framework.ModesComponent import ModesComponent, LayerMode
from MessageBoxComponent import Messenger
from MatrixMaps import FEEDBACK_CHANNELS, NON_FEEDBACK_CHANNEL
from InstrumentComponent import InstrumentComponent
from NoteEditorComponent import NoteEditorComponent
from PlayheadComponent import PlayheadComponent
from MelodicPattern import pitch_index_to_string
from LoopSelectorComponent import LoopSelectorComponent
from NoteEditorPaginator import NoteEditorPaginator
NUM_NOTE_EDITORS = 7
class MelodicComponent(ModesComponent, Messenger):
def __init__(self, clip_creator = None, parameter_provider = None, grid_resolution = None, note_editor_settings = None, skin = None, instrument_play_layer = None, instrument_sequence_layer = None, layer = None, *a, **k):
super(MelodicComponent, self).__init__(*a, **k)
self._matrices = None
self._grid_resolution = grid_resolution
self._instrument = self.register_component(InstrumentComponent())
self._note_editors = self.register_components(*[ NoteEditorComponent(settings_mode=note_editor_settings, clip_creator=clip_creator, grid_resolution=self._grid_resolution, is_enabled=False) for _ in xrange(NUM_NOTE_EDITORS) ])
self._paginator = NoteEditorPaginator(self._note_editors)
self._loop_selector = self.register_component(LoopSelectorComponent(clip_creator=clip_creator, paginator=self._paginator, is_enabled=False))
self._playhead = None
self._playhead_component = self.register_component(PlayheadComponent(grid_resolution=grid_resolution, paginator=self._paginator, follower=self._loop_selector, is_enabled=False))
self.add_mode('play', LayerMode(self._instrument, instrument_play_layer))
self.add_mode('sequence', [LayerMode(self._instrument, instrument_sequence_layer),
self._loop_selector,
note_editor_settings,
LayerMode(self, layer),
self._playhead_component] + self._note_editors)
self.selected_mode = 'play'
scales = self._instrument.scales
self._on_detail_clip_changed.subject = self.song().view
self._on_scales_changed.subject = scales
self._on_scales_preset_changed.subject = scales._presets
self._on_notes_changed.subject = self._instrument
self._on_selected_mode_changed.subject = self
self._on_detail_clip_changed()
self._update_note_editors()
self._skin = skin
self._playhead_color = 'Melodic.Playhead'
self._update_playhead_color()
return
scales_menu = forward_property('_instrument')('scales_menu')
scales = forward_property('_instrument')('scales')
def set_playhead(self, playhead):
self._playhead = playhead
self._playhead_component.set_playhead(playhead)
self._update_playhead_color()
@forward_property('_loop_selector')
def set_loop_selector_matrix(self, matrix):
pass
@forward_property('_loop_selector')
def set_short_loop_selector_matrix(self, matrix):
pass
next_loop_page_button = forward_property('_loop_selector')('next_page_button')
prev_loop_page_button = forward_property('_loop_selector')('prev_page_button')
def set_note_editor_matrices(self, matrices):
if matrices and not len(matrices) <= NUM_NOTE_EDITORS: raise AssertionError
self._matrices = matrices
for editor, matrix in map(None, self._note_editors, matrices or []):
if editor:
editor.set_button_matrix(matrix)
self._update_matrix_channels_for_playhead()
return
def _get_playhead_color(self):
self._playhead_color
def _set_playhead_color(self, value):
self._playhead_color = 'Melodic.' + value
self._update_playhead_color()
playhead_color = property(_get_playhead_color, _set_playhead_color)
@subject_slot('detail_clip')
def _on_detail_clip_changed(self):
if self.is_enabled():
clip = self.song().view.detail_clip
clip = clip if self.is_enabled() and clip and clip.is_midi_clip else None
for note_editor in self._note_editors:
note_editor.set_detail_clip(clip)
self._loop_selector.set_detail_clip(clip)
self._playhead_component.set_clip(clip)
self._instrument.set_detail_clip(clip)
return
def _set_full_velocity(self, enable):
for note_editor in self._note_editors:
note_editor.full_velocity = enable
def _get_full_velocity(self):
self._note_editors[0].full_velocity
full_velocity = property(_get_full_velocity, _set_full_velocity)
def set_quantization_buttons(self, buttons):
self._grid_resolution.set_buttons(buttons)
def set_mute_button(self, button):
for e in self._note_editors:
e.set_mute_button(button)
@subject_slot('selected_mode')
def _on_selected_mode_changed(self, mode):
self._show_notes_information(mode)
@subject_slot('position')
def _on_notes_changed(self, *args):
self._update_note_editors()
self._show_notes_information()
@subject_slot('selected_mode')
def _on_scales_preset_changed(self, mode):
self._update_note_editors()
@subject_slot('scales_changed')
def _on_scales_changed(self):
self._update_note_editors()
def _update_note_editors(self, *a):
for row, note_editor in enumerate(self._note_editors):
note_info = self._instrument.pattern[row]
note_editor.background_color = 'NoteEditor.' + note_info.color
note_editor.editing_note = note_info.index
self._update_matrix_channels_for_playhead()
def _update_matrix_channels_for_playhead(self):
if self.is_enabled() and self._matrices != None:
pattern = self._instrument.pattern
for matrix, (y, _) in self._matrices.iterbuttons():
if matrix:
for x, button in enumerate(matrix):
if button:
if pattern[y].index != None:
button.set_identifier(x)
button.set_channel(FEEDBACK_CHANNELS[y])
else:
button.set_identifier(button._original_identifier)
button.set_channel(NON_FEEDBACK_CHANNEL)
return
def _update_playhead_color(self):
if self.is_enabled() and self._skin and self._playhead:
self._playhead.velocity = int(self._skin[self._playhead_color])
def update(self):
super(MelodicComponent, self).update()
self._on_detail_clip_changed()
self._update_playhead_color()
def _show_notes_information(self, mode = None):
if self.is_enabled():
if mode is None:
mode = self.selected_mode
if mode == 'sequence':
message = 'Sequence %s to %s'
first = find_if(lambda editor: editor.editing_note != None, self._note_editors)
last = find_if(lambda editor: editor.editing_note != None, reversed(self._note_editors))
start_note = first.editing_note if first != None else None
end_note = last.editing_note if last != None else None
else:
message = 'Play %s to %s'
start_note = self._instrument._pattern.note(0, 0).index
end_note = self._instrument._pattern.note(7, 7).index
self.show_notification(message % (pitch_index_to_string(start_note), pitch_index_to_string(end_note)))
return
| 5,891
| 1,276
| 23
|
1b385350022d77771f9fd2a0d89d1eb2b4a50830
| 2,917
|
py
|
Python
|
coral_deeplab/pretrained.py
|
xadrianzetx/coral-deeplab
|
aa685a9b694339685d3fc7510296ecbe513838bb
|
[
"MIT"
] | 5
|
2021-03-15T10:26:14.000Z
|
2022-03-03T14:33:07.000Z
|
coral_deeplab/pretrained.py
|
xadrianzetx/coral-deeplab
|
aa685a9b694339685d3fc7510296ecbe513838bb
|
[
"MIT"
] | 3
|
2021-06-28T22:07:29.000Z
|
2021-10-16T17:48:31.000Z
|
coral_deeplab/pretrained.py
|
xadrianzetx/coral-deeplab
|
aa685a9b694339685d3fc7510296ecbe513838bb
|
[
"MIT"
] | 2
|
2021-06-29T08:06:02.000Z
|
2021-09-30T08:15:08.000Z
|
# MIT License
# Copyright (c) 2021 xadrianzetx
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import Enum
| 39.418919
| 80
| 0.731231
|
# MIT License
# Copyright (c) 2021 xadrianzetx
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import Enum
class MLModel(Enum):
pass
class KerasModel(MLModel):
DEEPLAB_V3_DM1 = {
'origin': '1CE7cMfgViNgFxXKbCq0wFXeO8slV0Z01',
'filename': 'deeplabv3_mnv2_dm1_voc_tainaug_os16.h5',
'checksum': 'b326724d7e89d8cc7f409edbf1b11105'
}
DEEPLAB_V3_DM05 = {
'origin': '1J-8hCUNYxbWgazflv8CGVYgmqoFHxB_N',
'filename': 'deeplabv3_mnv2_dm05_voc_tainaug_os16.h5',
'checksum': '36e1e957a62848451db92c53abc1d7d7'
}
DEEPLAB_V3_PLUS_DM1 = {
'origin': '191I3qg-S245BD8aX1jfGF2Yy3H9-1A1l',
'filename': 'deeplabv3plus_mnv2_dm1_voc_trainaug_os4.h5',
'checksum': 'c43f0acf3a256daa237da66ecedb4565'
}
DEEPLAB_V3_PLUS_DM05 = {
'origin': '17wv_wRPZMnj2s_y_nol8whkwda0C2O77',
'filename': 'deeplabv3plus_mnv2_dm05_voc_trainaug_os4.h5',
'checksum': 'e3e002c39716bc54f966bae657fc2f78'
}
class EdgeTPUModel(MLModel):
DEEPLAB_V3_DM1 = {
'origin': '1YmaaQ9qOxlMfB9eAI7roOqgeo4y7Mosg',
'filename': 'deeplabv3_mnv2_dm1_voc_tainaug_os16_edgetpu.tflite',
'checksum': '6c0ade5b647dc137f6231a9724cf65e6'
}
DEEPLAB_V3_DM05 = {
'origin': '1bukSOJf8JL_RSQwrCIypvzxxamEhO9cV',
'filename': 'deeplabv3_mnv2_dm05_voc_tainaug_os16_edgetpu.tflite',
'checksum': '2d3ad50d08c12dba4d5ea61f59bb0b79'
}
DEEPLAB_V3_PLUS_DM1 = {
'origin': '1-2U13RHX5b-h7rIfhxovpxeC4c6DNA8r',
'filename': 'deeplabv3plus_mnv2_dm1_voc_trainaug_os4_edgetpu.tflite',
'checksum': '3ad64d967a3e526d7df4a3b3a8a60f8a'
}
DEEPLAB_V3_PLUS_DM05 = {
'origin': '1DJ11luO0SMU69egtPShP-4-rSVYki-HP',
'filename': 'deeplabv3plus_mnv2_dm05_voc_trainaug_os4_edgetpu.tflite',
'checksum': 'abab0449b81be44efcfab4cacccc7f1b'
}
| 0
| 1,720
| 69
|
73296fabb1841f8ea0d871204567b5801050b15c
| 1,619
|
py
|
Python
|
metalprot/database/database_download.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
metalprot/database/database_download.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
metalprot/database/database_download.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
import os
import prody as pr
# Manipulate rcsb file
def organize_rcsb_file(workdir = "/mnt/e/DesignData/ligands/NI_rcsb/"):
'''
The .csv files downloaded from rcsb database will be combined first,
then generate tab deliminated txt file.
'''
all_lines = []
for file in os.listdir(workdir):
if file.endswith(".csv"):
with open(workdir + file, 'r') as f:
all_lines.extend(f.readlines())
with open(workdir + 'all_rcsb.txt', 'w') as f:
f.write('\t'.join(all_lines[0].split(',')))
for r in all_lines:
if 'Entry ID' not in r and r.split(',')[0]!= '':
f.write('\t'.join(r.split(',')))
# download rcsb pdb files
| 30.54717
| 91
| 0.551575
|
import os
import prody as pr
# Manipulate rcsb file
def organize_rcsb_file(workdir = "/mnt/e/DesignData/ligands/NI_rcsb/"):
'''
The .csv files downloaded from rcsb database will be combined first,
then generate tab deliminated txt file.
'''
all_lines = []
for file in os.listdir(workdir):
if file.endswith(".csv"):
with open(workdir + file, 'r') as f:
all_lines.extend(f.readlines())
with open(workdir + 'all_rcsb.txt', 'w') as f:
f.write('\t'.join(all_lines[0].split(',')))
for r in all_lines:
if 'Entry ID' not in r and r.split(',')[0]!= '':
f.write('\t'.join(r.split(',')))
# download rcsb pdb files
def download_pdb(workdir, filename, resolution = 2.5):
if not os.path.exists(workdir):
os.mkdir(workdir)
all_pdbs = []
with open(workdir + filename, 'r') as f:
for line in f.readlines():
#print(line)
r = line.split('\t')
#print(r)
if r[0] == '"Entry ID"': continue
if r[0] == '' or r[4]== '' or (',' in r[4]) or float(r[4].split('"')[1]) > 2.5:
continue
all_pdbs.append(r[0].split('"')[1])
exist_pdb = set()
for file in os.listdir(workdir):
if file.endswith(".pdb.gz"):
exist_pdb.add(file.split('.')[0].upper())
pr.pathPDBFolder(workdir)
for p in all_pdbs:
if p in exist_pdb: continue
pr.fetchPDBviaFTP(p, compressed = False)
# #Then unzip them in linux with:
# cd /mnt/e/DesignData/ligands/NI_rcsb/
# gunzip *.gz
| 879
| 0
| 23
|
0ce7701a7823d8739f05f92a5128f0b1ff249404
| 2,146
|
py
|
Python
|
config.py
|
jackgibson2/lambda-cleaner
|
433fbf7b7393a6d49771346e5b48939428ed663a
|
[
"MIT"
] | null | null | null |
config.py
|
jackgibson2/lambda-cleaner
|
433fbf7b7393a6d49771346e5b48939428ed663a
|
[
"MIT"
] | null | null | null |
config.py
|
jackgibson2/lambda-cleaner
|
433fbf7b7393a6d49771346e5b48939428ed663a
|
[
"MIT"
] | null | null | null |
import os
import json
import boto3
REGION = 'us-east-2'
session = boto3.session.Session(profile_name='sandbox')
#iam = boto3.resource('iam', region_name=REGION)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:DeleteVolume",
"Resource": "arn:aws:ec2:us-east-2:xxxxx:volume/*"
},
{
"Effect": "Allow",
"Action": "ec2:DeleteSnapshot",
"Resource": "arn:aws:ec2:us-east-2:xxxxx:snapshot/*"
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"autoscaling:SetDesiredCapacity",
"ssm:DescribeParameters",
"autoscaling:DescribeAutoScalingGroups",
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ssm:GetParameters",
"Resource": "arn:aws:ssm:us-east-2:xxxxx:parameter/mysandbox/*"
},
{
"Effect": "Allow",
"Action": "ssm:PutParameter",
"Resource": "arn:aws:ssm:us-east-2:xxxxx:parameter/mysandbox/*"
},
{
"Effect": "Allow",
"Action": [
"ec2:TerminateInstances",
"ec2:StopInstances"
],
"Resource": "arn:aws:ec2:us-east-2:xxxxx:instance/*"
}
]
}
parameterRoot = '/AccountCleaner/'
retentionDays = 7
ssm = session.client('ssm', region_name=REGION)
ssm.put_parameter(
Name=parameterRoot + 'retentionDays',
Description='Days to retain snapsots',
Value=str(retentionDays),
Type='String',
Overwrite=True)
ssm.put_parameter(
Name=parameterRoot + 'Enalbed',
Description='Flag to turn off cleaner lambdas globally',
Value='True',
Type='String',
Overwrite=True)
ssm.put_parameter(
Name=parameterRoot + 'DryRun',
Description='Flag to turn dry run on for cleaner lambdas globally',
Value='False',
Type='String',
Overwrite=True)
| 26.825
| 75
| 0.536813
|
import os
import json
import boto3
REGION = 'us-east-2'
session = boto3.session.Session(profile_name='sandbox')
#iam = boto3.resource('iam', region_name=REGION)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:DeleteVolume",
"Resource": "arn:aws:ec2:us-east-2:xxxxx:volume/*"
},
{
"Effect": "Allow",
"Action": "ec2:DeleteSnapshot",
"Resource": "arn:aws:ec2:us-east-2:xxxxx:snapshot/*"
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"autoscaling:SetDesiredCapacity",
"ssm:DescribeParameters",
"autoscaling:DescribeAutoScalingGroups",
"ec2:DescribeVolumes",
"ec2:DescribeSnapshots"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ssm:GetParameters",
"Resource": "arn:aws:ssm:us-east-2:xxxxx:parameter/mysandbox/*"
},
{
"Effect": "Allow",
"Action": "ssm:PutParameter",
"Resource": "arn:aws:ssm:us-east-2:xxxxx:parameter/mysandbox/*"
},
{
"Effect": "Allow",
"Action": [
"ec2:TerminateInstances",
"ec2:StopInstances"
],
"Resource": "arn:aws:ec2:us-east-2:xxxxx:instance/*"
}
]
}
parameterRoot = '/AccountCleaner/'
retentionDays = 7
ssm = session.client('ssm', region_name=REGION)
ssm.put_parameter(
Name=parameterRoot + 'retentionDays',
Description='Days to retain snapsots',
Value=str(retentionDays),
Type='String',
Overwrite=True)
ssm.put_parameter(
Name=parameterRoot + 'Enalbed',
Description='Flag to turn off cleaner lambdas globally',
Value='True',
Type='String',
Overwrite=True)
ssm.put_parameter(
Name=parameterRoot + 'DryRun',
Description='Flag to turn dry run on for cleaner lambdas globally',
Value='False',
Type='String',
Overwrite=True)
| 0
| 0
| 0
|
35ad11a063628120bac86d92a7d8d61ab2e3bf24
| 11,098
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql/dauphin_registry.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | 1
|
2021-04-30T00:19:20.000Z
|
2021-04-30T00:19:20.000Z
|
python_modules/dagster-graphql/dagster_graphql/dauphin_registry.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql/dauphin_registry.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
"""
Dauphin is wrapper module around graphene meant to provide a couple additional
features. Most importantly is a type registry. Instead of referring to
the class that corresponds to the GraphQL type everywhere, you are instead
allows to use the GraphQL string. This solves an immediate short term problem
in that it is quite irritating to manage dependencies in a graphql schema
where the types refer to each other in cyclic fashion. Breaking up a schema
into multiple files without this feature (Python has no notion of forward
declarations) is difficult.
Dauphin is meant to totally wrap graphene. That means if you are viewing a code
sample online or within the graphene docs, one should be be able use
dauphin.ChooseYourClass instead of graphene.ChooseYourClass.
We also use dauphin as disintermediation layer between our application code and
graphene in places where we want additional strictness or more convenient idioms.
e.g.
dauphin.non_null_list(dauphin.String)
as opposed to
graphene.NonNull(graphene.List(graphene.NonNull(graphene.String)))
"""
from functools import partial
import graphene
from graphene.types.definitions import GrapheneGraphQLType, GrapheneObjectType, GrapheneUnionType
from graphene.types.enum import EnumMeta
from graphene.types.generic import GenericScalar
from graphene.types.typemap import TypeMap as GrapheneTypeMap
from graphene.types.typemap import resolve_type
from graphene.utils.subclass_with_meta import SubclassWithMeta_Meta
from graphql.type.introspection import IntrospectionSchema
GRAPHENE_TYPES = [
graphene.ObjectType,
graphene.InputObjectType,
graphene.Interface,
graphene.Scalar,
]
GRAPHENE_BUILT_IN = [
graphene.String,
graphene.Int,
graphene.Float,
graphene.Boolean,
graphene.ID,
GenericScalar,
]
# we change map to map_ in construct_union override because of collision with built-in
# pylint: disable=W0221
| 35.915858
| 100
| 0.67769
|
"""
Dauphin is wrapper module around graphene meant to provide a couple additional
features. Most importantly is a type registry. Instead of referring to
the class that corresponds to the GraphQL type everywhere, you are instead
allows to use the GraphQL string. This solves an immediate short term problem
in that it is quite irritating to manage dependencies in a graphql schema
where the types refer to each other in cyclic fashion. Breaking up a schema
into multiple files without this feature (Python has no notion of forward
declarations) is difficult.
Dauphin is meant to totally wrap graphene. That means if you are viewing a code
sample online or within the graphene docs, one should be be able use
dauphin.ChooseYourClass instead of graphene.ChooseYourClass.
We also use dauphin as disintermediation layer between our application code and
graphene in places where we want additional strictness or more convenient idioms.
e.g.
dauphin.non_null_list(dauphin.String)
as opposed to
graphene.NonNull(graphene.List(graphene.NonNull(graphene.String)))
"""
from functools import partial
import graphene
from graphene.types.definitions import GrapheneGraphQLType, GrapheneObjectType, GrapheneUnionType
from graphene.types.enum import EnumMeta
from graphene.types.generic import GenericScalar
from graphene.types.typemap import TypeMap as GrapheneTypeMap
from graphene.types.typemap import resolve_type
from graphene.utils.subclass_with_meta import SubclassWithMeta_Meta
from graphql.type.introspection import IntrospectionSchema
GRAPHENE_TYPES = [
graphene.ObjectType,
graphene.InputObjectType,
graphene.Interface,
graphene.Scalar,
]
GRAPHENE_BUILT_IN = [
graphene.String,
graphene.Int,
graphene.Float,
graphene.Boolean,
graphene.ID,
GenericScalar,
]
# we change map to map_ in construct_union override because of collision with built-in
# pylint: disable=W0221
def get_meta(graphene_type):
return graphene_type._meta # pylint: disable=W0212
class DauphinRegistry:
def __init__(self):
self._typeMap = {}
self.Field = create_registry_field(self)
self.Argument = create_registry_argument(self)
self.List = create_registry_list(self)
self.NonNull = create_registry_nonnull(self)
registering_metaclass = create_registering_metaclass(self)
self.Union = create_union(registering_metaclass, self)
self.Enum = create_enum(registering_metaclass)
self.Mutation = graphene.Mutation
# Not looping over GRAPHENE_TYPES in order to not fool lint
self.ObjectType = create_registering_class(graphene.ObjectType, registering_metaclass)
self.InputObjectType = create_registering_class(
graphene.InputObjectType, registering_metaclass
)
self.Interface = create_registering_class(graphene.Interface, registering_metaclass)
self.Scalar = create_registering_class(graphene.Scalar, registering_metaclass)
# Not looping over GRAPHENE_BUILTINS in order to not fool lint
self.String = graphene.String
self.addType(graphene.String)
self.Int = graphene.Int
self.addType(graphene.Int)
self.Float = graphene.Float
self.addType(graphene.Float)
self.Boolean = graphene.Boolean
self.addType(graphene.Boolean)
self.ID = graphene.ID
self.addType(graphene.ID)
self.GenericScalar = GenericScalar
self.addType(GenericScalar)
def create_schema(self):
return DauphinSchema(
query=self.getType("Query"),
mutation=self.getTypeOrNull("Mutation"),
subscription=self.getTypeOrNull("Subscription"),
types=self.getAllImplementationTypes(),
registry=self,
)
def getTypeOrNull(self, typeName):
return self._typeMap.get(typeName)
def getType(self, typeName):
graphene_type = self.getTypeOrNull(typeName)
if not graphene_type:
raise Exception("No such type {typeName}.".format(typeName=typeName))
return graphene_type
def getAllTypes(self):
return self._typeMap.values()
def getAllImplementationTypes(self):
return [t for t in self._typeMap.values() if issubclass(t, self.ObjectType)]
def addType(self, graphene_type):
meta = get_meta(graphene_type)
if meta:
if not graphene_type in self._typeMap:
self._typeMap[meta.name] = graphene_type
else:
raise Exception(
"Type {typeName} already exists in the registry.".format(typeName=meta.name)
)
else:
raise Exception("Cannot add unnamed type or a non-type to registry.")
def non_null_list(self, of_type):
return self.NonNull(self.List(self.NonNull(of_type)))
class DauphinSchema(graphene.Schema):
def __init__(self, registry, **kwargs):
self._typeRegistry = registry
super(DauphinSchema, self).__init__(**kwargs)
def build_typemap(self):
initial_types = [self._query, self._mutation, self._subscription, IntrospectionSchema]
if self.types:
initial_types += self.types
self._type_map = DauphinTypeMap(
initial_types,
auto_camelcase=self.auto_camelcase,
schema=self,
typeRegistry=self._typeRegistry,
)
def type_named(self, name):
return getattr(self, name)
class DauphinTypeMap(GrapheneTypeMap):
def __init__(self, types, typeRegistry=None, **kwargs):
self._typeRegistry = typeRegistry
super(DauphinTypeMap, self).__init__(types, **kwargs)
def construct_object_type(self, map_, graphene_type):
type_meta = get_meta(graphene_type)
if type_meta.name in map_:
mapped_type = map_[get_meta(graphene_type).name]
if isinstance(mapped_type, GrapheneGraphQLType):
assert mapped_type.graphene_type == graphene_type, (
"Found different types with the same name in the schema: {}, {}."
).format(mapped_type.graphene_type, graphene_type)
return mapped_type
# TODO the codepath below appears to be untested
def interfaces():
interfaces = []
for interface in type_meta.interfaces:
if isinstance(interface, str):
interface = self._typeRegistry.getType(interface)
self.graphene_reducer(map_, interface)
internal_type = map_[get_meta(interface).name]
assert internal_type.graphene_type == interface
interfaces.append(internal_type)
return interfaces
if type_meta.possible_types:
# FIXME: is_type_of_from_possible_types does not exist
# is_type_of = partial(is_type_of_from_possible_types, type_meta.possible_types)
raise Exception("Not sure what is going on here. Untested codepath")
else:
is_type_of = type.is_type_of
return GrapheneObjectType(
graphene_type=type,
name=type_meta.name,
description=type_meta.description,
fields=partial(self.construct_fields_for_type, map_, type),
is_type_of=is_type_of,
interfaces=interfaces,
)
def construct_union(self, map_, graphene_type):
union_resolve_type = None
type_meta = get_meta(graphene_type)
if graphene_type.resolve_type:
union_resolve_type = partial(
resolve_type, graphene_type.resolve_type, map_, type_meta.name
)
def types():
union_types = []
for objecttype in type_meta.types:
if isinstance(objecttype, str):
objecttype = self._typeRegistry.getType(objecttype)
self.graphene_reducer(map_, objecttype)
internal_type = map_[get_meta(objecttype).name]
assert internal_type.graphene_type == objecttype
union_types.append(internal_type)
return union_types
return GrapheneUnionType(
graphene_type=graphene_type,
name=type_meta.name,
description=type_meta.description,
types=types,
resolve_type=union_resolve_type,
)
def create_registering_metaclass(registry):
class RegisteringMetaclass(SubclassWithMeta_Meta):
def __init__(cls, name, bases, namespaces):
super(RegisteringMetaclass, cls).__init__( # pylint: disable=no-value-for-parameter
name, bases, namespaces
)
if any(base for base in bases if getattr(base, "__dauphinCoreType", False)):
registry.addType(cls)
return RegisteringMetaclass
def create_registering_class(cls, metaclass):
new_cls = metaclass(cls.__name__, (cls,), {})
setattr(new_cls, "__dauphinCoreType", True)
return new_cls
def create_union(metaclass, _registry):
meta_class = type("Meta", (object,), {"types": ("__", "__")})
Union = metaclass("Union", (graphene.Union,), {"Meta": meta_class})
setattr(Union, "__dauphinCoreType", True)
return Union
def create_enum(metaclass):
class EnumRegisteringMetaclass(metaclass, EnumMeta):
pass
def from_enum(cls, enum, description=None, deprecation_reason=None):
description = description or enum.__doc__
meta_dict = {
"enum": enum,
"description": description,
"deprecation_reason": deprecation_reason,
}
meta_class = type("Meta", (object,), meta_dict)
return type(meta_class.enum.__name__, (cls,), {"Meta": meta_class})
Enum = EnumRegisteringMetaclass("Enum", (graphene.Enum,), {"from_enum": classmethod(from_enum)})
setattr(Enum, "__dauphinCoreType", True)
return Enum
def get_type_fn(registry, dauphin_type):
if isinstance(dauphin_type, str):
return lambda: registry.getType(dauphin_type)
else:
return dauphin_type
def create_registry_field(registry):
class Field(graphene.Field):
def __init__(self, dauphin_type, *args, **kwargs):
super(Field, self).__init__(get_type_fn(registry, dauphin_type), *args, **kwargs)
return Field
def create_registry_argument(registry):
class Argument(graphene.Argument):
def __init__(self, dauphin_type, *args, **kwargs):
super(Argument, self).__init__(get_type_fn(registry, dauphin_type), *args, **kwargs)
return Argument
def create_registry_list(registry):
class List(graphene.List):
def __init__(self, of_type, *args, **kwargs):
super(List, self).__init__(get_type_fn(registry, of_type), *args, **kwargs)
return List
def create_registry_nonnull(registry):
class NonNull(graphene.NonNull):
def __init__(self, of_type, *args, **kwargs):
super(NonNull, self).__init__(get_type_fn(registry, of_type), *args, **kwargs)
return NonNull
| 8,463
| 34
| 674
|
434f9a60d0158ff1d591eee55a9eecd875a4fe07
| 155
|
py
|
Python
|
10/00/7.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
10/00/7.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | 39
|
2017-07-31T22:54:01.000Z
|
2017-08-31T00:19:03.000Z
|
10/00/7.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
#!python3.6
print("int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42))
print("int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42))
| 38.75
| 72
| 0.477419
|
#!python3.6
print("int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42))
print("int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42))
| 0
| 0
| 0
|
aa2b998413ca4e625b5b5ade9809014351a9b998
| 1,592
|
py
|
Python
|
server.py
|
nagasudhirpulla/grafana_smscountry_webhook
|
d8ab182b8f5ec12f00eaf966ec15a234782154e0
|
[
"MIT"
] | null | null | null |
server.py
|
nagasudhirpulla/grafana_smscountry_webhook
|
d8ab182b8f5ec12f00eaf966ec15a234782154e0
|
[
"MIT"
] | null | null | null |
server.py
|
nagasudhirpulla/grafana_smscountry_webhook
|
d8ab182b8f5ec12f00eaf966ec15a234782154e0
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from waitress import serve
from src.config.appConfig import loadAppConfig
from src.logs.loggerFactory import getFileLogger
from src.services.smsSender import SmsApi
# get application config
appConf = loadAppConfig()
# setup logging based on application config
backUpCount = appConf["backUpCount"]
fileRollingHrs = appConf["fileRollingHrs"]
logFilePath = appConf["logFilePath"]
logger = getFileLogger(
"app_logger", logFilePath, backUpCount, fileRollingHrs)
# create webhook server
app = Flask(__name__)
app.secret_key = appConf['flaskSecret']
app.logger = logger
# initialize sms api sender with required parameters from application config
smsApi = SmsApi(appConf["smsUsername"], appConf["smsPassword"],
appConf["persons"], appConf["groups"])
@app.route('/')
@app.route('/api/send-sms/<grpName>', methods=['POST'])
if __name__ == '__main__':
serverMode: str = appConf['mode']
if serverMode.lower() == 'd':
app.run(host=appConf["flaskHost"], port=int(
appConf["flaskPort"]), debug=True)
else:
serve(app, host=appConf["flaskHost"], port=int(
appConf["flaskPort"]), threads=1)
| 29.481481
| 76
| 0.702261
|
from flask import Flask, request
from waitress import serve
from src.config.appConfig import loadAppConfig
from src.logs.loggerFactory import getFileLogger
from src.services.smsSender import SmsApi
# get application config
appConf = loadAppConfig()
# setup logging based on application config
backUpCount = appConf["backUpCount"]
fileRollingHrs = appConf["fileRollingHrs"]
logFilePath = appConf["logFilePath"]
logger = getFileLogger(
"app_logger", logFilePath, backUpCount, fileRollingHrs)
# create webhook server
app = Flask(__name__)
app.secret_key = appConf['flaskSecret']
app.logger = logger
# initialize sms api sender with required parameters from application config
smsApi = SmsApi(appConf["smsUsername"], appConf["smsPassword"],
appConf["persons"], appConf["groups"])
@app.route('/')
def index():
# end point for testing the webhook
return 'Hello, World!'
@app.route('/api/send-sms/<grpName>', methods=['POST'])
def sendSms(grpName: str):
# api end point to send sms
msgJson = request.json
alertMsg = msgJson["message"]
alertState = msgJson["state"].capitalize()
smsStr = "[{0}] {1}".format(alertState, alertMsg)
isSuccess = smsApi.sendSmsToGroup(grpName, smsStr)
# logger.log(smsStr)
return isSuccess
if __name__ == '__main__':
serverMode: str = appConf['mode']
if serverMode.lower() == 'd':
app.run(host=appConf["flaskHost"], port=int(
appConf["flaskPort"]), debug=True)
else:
serve(app, host=appConf["flaskHost"], port=int(
appConf["flaskPort"]), threads=1)
| 358
| 0
| 44
|
2b3d9561450c764cc9dff63f42c7c64befb8dd30
| 1,213
|
py
|
Python
|
gans/CGAN/loader.py
|
IvLabs/Variational-DL
|
cd431564ae77ba42a485db17416a6033b32c48fb
|
[
"MIT"
] | 37
|
2020-12-24T10:03:16.000Z
|
2022-01-18T05:37:07.000Z
|
gans/CGAN/loader.py
|
vignesh-creator/Variational-DL
|
cd431564ae77ba42a485db17416a6033b32c48fb
|
[
"MIT"
] | 1
|
2021-10-03T20:04:36.000Z
|
2021-10-04T17:21:51.000Z
|
gans/CGAN/loader.py
|
vignesh-creator/Variational-DL
|
cd431564ae77ba42a485db17416a6033b32c48fb
|
[
"MIT"
] | 36
|
2020-12-27T16:38:27.000Z
|
2022-03-21T17:20:22.000Z
|
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
training_data = datasets.CIFAR10(root="data", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
]))
validation_data = datasets.CIFAR10(root="data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
]))
#Hyper parameters
batch_size = 128
d_lr = 2e-4 #learning rate of discriminator
g_lr = 2e-4 #learning rate of generator
epochs = 20
train_shape = training_data.data.shape[0]
training_loader = DataLoader(training_data,batch_size=batch_size, shuffle=True,pin_memory=True)
validation_loader = DataLoader(validation_data,batch_size=16,shuffle=True,pin_memory=True)
| 44.925926
| 96
| 0.575433
|
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
training_data = datasets.CIFAR10(root="data", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
]))
validation_data = datasets.CIFAR10(root="data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
]))
#Hyper parameters
batch_size = 128
d_lr = 2e-4 #learning rate of discriminator
g_lr = 2e-4 #learning rate of generator
epochs = 20
train_shape = training_data.data.shape[0]
training_loader = DataLoader(training_data,batch_size=batch_size, shuffle=True,pin_memory=True)
validation_loader = DataLoader(validation_data,batch_size=16,shuffle=True,pin_memory=True)
| 0
| 0
| 0
|
cc96cf4d5225718933a96ae5b76fe27a222e3d90
| 11,003
|
py
|
Python
|
eservice/pdo/sservice/block_store_manager.py
|
sambacha/private-data-objects
|
635049918b362ba81ad74469cbea6b2c53380d9e
|
[
"Apache-2.0"
] | 84
|
2018-05-04T15:07:53.000Z
|
2022-03-23T09:38:17.000Z
|
eservice/pdo/sservice/block_store_manager.py
|
sambacha/private-data-objects
|
635049918b362ba81ad74469cbea6b2c53380d9e
|
[
"Apache-2.0"
] | 218
|
2018-05-07T20:10:25.000Z
|
2022-03-23T17:27:44.000Z
|
eservice/pdo/sservice/block_store_manager.py
|
sambacha/private-data-objects
|
635049918b362ba81ad74469cbea6b2c53380d9e
|
[
"Apache-2.0"
] | 33
|
2018-03-02T20:32:18.000Z
|
2021-09-17T07:07:57.000Z
|
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""helper.py
This file defines a class to implement the various storage service
operations on the lmdb file.
"""
import base64
import hashlib
import lmdb
import struct
import time
import pdo.common.keys as keys
from pdo.service_client.storage import StorageException
import logging
logger = logging.getLogger(__name__)
class BlockMetadata(object) :
"""Implements a wrapper for block metadata.
"""
minimum_expiration_time = 60
@classmethod
class BlockStoreManager(object) :
"""Implements the storage service operations in a way that provides
symmetry with the storage service client.
"""
map_size = 1 << 40
def __init__(self, block_store_file, service_keys = None, create_block_store=False) :
"""Initialize storage service class instance
:param block_store_file string: name of the lmdb file used for block storage
:param service_keys ServiceKeys: ECDSA keys used to sign storage contracts
:param create_block_store boolean: flag to note that missing blockstore file should be created
"""
self.service_keys = service_keys
if self.service_keys is None :
self.service_keys = keys.ServiceKeys.create_service_keys()
self.block_store_env = lmdb.open(
block_store_file,
create=create_block_store,
max_dbs=2,
subdir=False,
sync=False,
map_size=self.map_size)
def close(self) :
"""Sync the database to disk and close the handles
"""
self.block_store_env.sync()
self.block_store_env.close()
self.block_store_env = None
def get_service_info(self) :
"""Return useful information about the service
:return dict: dictionary of information about the storage service
"""
return {'verifying_key' : self.service_keys.verifying_key }
def list_blocks(self, encoding='b64') :
"""Return a list of all block identifiers currently
stored in the database; mostly for debugging purposes
:param encoding string: encoding to use for block identifiers, raw/b64
:return list of string: list of block identifiers
"""
encoding_fn = lambda x : x
if encoding == 'b64' :
encoding_fn = lambda x : base64.urlsafe_b64encode(x).decode()
mdb = self.block_store_env.open_db(b'meta_data')
block_ids = []
with self.block_store_env.begin() as txn :
cursor = txn.cursor(db=mdb)
for key, value in cursor :
block_ids.append(encoding_fn(key))
return block_ids
def get_block(self, block_id, encoding='b64') :
"""Return the data for a block given the hash of the block
:param block_id string: block identifier
:param encoding string: encoding to use for block identifiers, raw/b64
:return string: block data
"""
decoding_fn = lambda x : x
if encoding == 'b64' :
decoding_fn = lambda x : base64.urlsafe_b64decode(x)
block_hash = decoding_fn(block_id)
bdb = self.block_store_env.open_db(b'block_data')
with self.block_store_env.begin() as txn :
block_data = txn.get(block_hash, db=bdb)
return block_data
# return block_data_list
def get_blocks(self, block_ids, encoding='b64') :
"""Return the data for a list of blocks
"""
# the iterator means that we don't have to use as much memory
# for operations that can process the blocks one at a time
return self.__block_iterator__(block_ids, encoding)
def store_block(self, block_data, expiration=60, encoding='b64') :
"""Add a new data block to the store
:param block_data string: binary content of the block
:param encoding string: encoding to use for block identifiers, raw/b64
:return string: block identifier
"""
return self.store_blocks([block_data], expiration, encoding)
def store_blocks(self, block_data_list, expiration=60, encoding='b64') :
"""Save a list of blocks in the store
:param iterable block_data_list: iterable collection of blocks to store
:param expiration int: number of seconds to use for expiration
:param encoding string: encoding to use for block identifiers, raw/b64
:return list of string: list of block identifiers
"""
encoding_fn = lambda x : x
if encoding == 'b64' :
encoding_fn = lambda x : base64.urlsafe_b64encode(x).decode()
current_time = int(time.time())
expiration_time = current_time + expiration
mdb = self.block_store_env.open_db(b'meta_data')
bdb = self.block_store_env.open_db(b'block_data')
block_hashes = []
# this might keep the database locked for too long for a write transaction
# might want to flip the order, one transaction per update
with self.block_store_env.begin(write=True) as txn :
for block_data in block_data_list :
block_hash = hashlib.sha256(block_data).digest()
block_hashes.append(block_hash)
# need to check to see if the block already exists, if it
# does then just extend the expiration time if necessary
raw_metadata = txn.get(block_hash, db=mdb)
if raw_metadata :
metadata = BlockMetadata.unpack(raw_metadata)
if expiration_time > metadata.expiration_time :
metadata.expiration_time = expiration_time
if not txn.put(block_hash, metadata.pack(), db=mdb, overwrite=True) :
raise StorageException("failed to update metadata")
continue
# this is a new block that needs to be added
metadata = BlockMetadata()
metadata.block_size = len(block_data)
metadata.create_time = current_time
metadata.expiration_time = expiration_time
metadata.mark = 0
if not txn.put(block_hash, metadata.pack(), db=mdb) :
raise StorageException("failed to save metadata")
if not txn.put(block_hash, block_data, db=bdb) :
raise StorageException("failed to save block data")
try :
# going to just concatenate all hashes, safe since these are all fixed size
signing_hash_accumulator = expiration.to_bytes(32, byteorder='big', signed=False)
signing_hash_accumulator += b''.join(block_hashes)
signing_hash = hashlib.sha256(signing_hash_accumulator).digest()
signature = self.service_keys.sign(signing_hash, encoding=encoding)
except Exception as e :
logger.error("unknown exception packing response (BlockStatus); %s", str(e))
return StorageException('signature failed')
result = dict()
result['signature'] = signature
result['block_ids'] = list(map(encoding_fn, block_hashes))
return result
def check_blocks(self, block_ids, encoding='b64') :
"""Check status of a list of block
:param block_ids list of string: block identifiers
:param encoding string: encoding to use for block identifiers, raw/b64
:return list of dict: list of block status
"""
decoding_fn = lambda x : x
if encoding == 'b64' :
decoding_fn = lambda x : base64.urlsafe_b64decode(x)
current_time = int(time.time())
mdb = self.block_store_env.open_db(b'meta_data')
block_status_list = []
with self.block_store_env.begin() as txn :
for block_id in block_ids :
# use the input format for the output block identifier
block_status = { 'block_id' : block_id, 'size' : 0, 'expiration' : 0 }
block_hash = decoding_fn(block_id)
raw_metadata = txn.get(block_hash, db=mdb)
if raw_metadata :
metadata = BlockMetadata.unpack(raw_metadata)
block_status['size'] = metadata.block_size
block_status['expiration'] = metadata.expiration_time - current_time
if block_status['expiration'] < 0 :
block_status['expiration'] = 0
block_status_list.append(block_status)
return block_status_list
def expire_blocks(self) :
"""Delete data and metadata for blocks that have expired
"""
try :
mdb = self.block_store_env.open_db(b'meta_data')
bdb = self.block_store_env.open_db(b'block_data')
current_time = int(time.time())
count = 0
with self.block_store_env.begin() as txn :
cursor = txn.cursor(db=mdb)
for key, value in cursor :
metadata = BlockMetadata.unpack(value)
if metadata.expiration_time < current_time :
logger.debug('expire block %s',base64.urlsafe_b64encode(key).decode())
count += 1
with self.block_store_env.begin(write=True) as dtxn :
assert dtxn.delete(key, db=bdb)
assert dtxn.delete(key, db=mdb)
logger.info('expired %d blocks', count)
except Exception as e :
logger.error('garbage collection failed; %s', str(e))
return None
return count
| 37.42517
| 103
| 0.624375
|
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""helper.py
This file defines a class to implement the various storage service
operations on the lmdb file.
"""
import base64
import hashlib
import lmdb
import struct
import time
import pdo.common.keys as keys
from pdo.service_client.storage import StorageException
import logging
logger = logging.getLogger(__name__)
class BlockMetadata(object) :
"""Implements a wrapper for block metadata.
"""
minimum_expiration_time = 60
@classmethod
def unpack(cls, value) :
metadata = struct.unpack('LLLL', value)
obj = cls()
obj.block_size = metadata[0]
obj.create_time = metadata[1]
obj.expiration_time = metadata[2]
obj.mark = metadata[3]
return obj
def __init__(self) :
self.block_size = 0
self.create_time = 0
self.expiration_time = 0
self.mark = 0
def pack(self) :
value = struct.pack('LLLL', self.block_size, self.create_time, self.expiration_time, self.mark)
return value
class BlockStoreManager(object) :
"""Implements the storage service operations in a way that provides
symmetry with the storage service client.
"""
map_size = 1 << 40
def __init__(self, block_store_file, service_keys = None, create_block_store=False) :
"""Initialize storage service class instance
:param block_store_file string: name of the lmdb file used for block storage
:param service_keys ServiceKeys: ECDSA keys used to sign storage contracts
:param create_block_store boolean: flag to note that missing blockstore file should be created
"""
self.service_keys = service_keys
if self.service_keys is None :
self.service_keys = keys.ServiceKeys.create_service_keys()
self.block_store_env = lmdb.open(
block_store_file,
create=create_block_store,
max_dbs=2,
subdir=False,
sync=False,
map_size=self.map_size)
def close(self) :
"""Sync the database to disk and close the handles
"""
self.block_store_env.sync()
self.block_store_env.close()
self.block_store_env = None
def get_service_info(self) :
"""Return useful information about the service
:return dict: dictionary of information about the storage service
"""
return {'verifying_key' : self.service_keys.verifying_key }
def list_blocks(self, encoding='b64') :
"""Return a list of all block identifiers currently
stored in the database; mostly for debugging purposes
:param encoding string: encoding to use for block identifiers, raw/b64
:return list of string: list of block identifiers
"""
encoding_fn = lambda x : x
if encoding == 'b64' :
encoding_fn = lambda x : base64.urlsafe_b64encode(x).decode()
mdb = self.block_store_env.open_db(b'meta_data')
block_ids = []
with self.block_store_env.begin() as txn :
cursor = txn.cursor(db=mdb)
for key, value in cursor :
block_ids.append(encoding_fn(key))
return block_ids
def get_block(self, block_id, encoding='b64') :
"""Return the data for a block given the hash of the block
:param block_id string: block identifier
:param encoding string: encoding to use for block identifiers, raw/b64
:return string: block data
"""
decoding_fn = lambda x : x
if encoding == 'b64' :
decoding_fn = lambda x : base64.urlsafe_b64decode(x)
block_hash = decoding_fn(block_id)
bdb = self.block_store_env.open_db(b'block_data')
with self.block_store_env.begin() as txn :
block_data = txn.get(block_hash, db=bdb)
return block_data
# return block_data_list
def __block_iterator__(self, block_ids, encoding) :
for block_id in block_ids :
yield self.get_block(block_id, encoding)
def get_blocks(self, block_ids, encoding='b64') :
"""Return the data for a list of blocks
"""
# the iterator means that we don't have to use as much memory
# for operations that can process the blocks one at a time
return self.__block_iterator__(block_ids, encoding)
def store_block(self, block_data, expiration=60, encoding='b64') :
"""Add a new data block to the store
:param block_data string: binary content of the block
:param encoding string: encoding to use for block identifiers, raw/b64
:return string: block identifier
"""
return self.store_blocks([block_data], expiration, encoding)
def store_blocks(self, block_data_list, expiration=60, encoding='b64') :
"""Save a list of blocks in the store
:param iterable block_data_list: iterable collection of blocks to store
:param expiration int: number of seconds to use for expiration
:param encoding string: encoding to use for block identifiers, raw/b64
:return list of string: list of block identifiers
"""
encoding_fn = lambda x : x
if encoding == 'b64' :
encoding_fn = lambda x : base64.urlsafe_b64encode(x).decode()
current_time = int(time.time())
expiration_time = current_time + expiration
mdb = self.block_store_env.open_db(b'meta_data')
bdb = self.block_store_env.open_db(b'block_data')
block_hashes = []
# this might keep the database locked for too long for a write transaction
# might want to flip the order, one transaction per update
with self.block_store_env.begin(write=True) as txn :
for block_data in block_data_list :
block_hash = hashlib.sha256(block_data).digest()
block_hashes.append(block_hash)
# need to check to see if the block already exists, if it
# does then just extend the expiration time if necessary
raw_metadata = txn.get(block_hash, db=mdb)
if raw_metadata :
metadata = BlockMetadata.unpack(raw_metadata)
if expiration_time > metadata.expiration_time :
metadata.expiration_time = expiration_time
if not txn.put(block_hash, metadata.pack(), db=mdb, overwrite=True) :
raise StorageException("failed to update metadata")
continue
# this is a new block that needs to be added
metadata = BlockMetadata()
metadata.block_size = len(block_data)
metadata.create_time = current_time
metadata.expiration_time = expiration_time
metadata.mark = 0
if not txn.put(block_hash, metadata.pack(), db=mdb) :
raise StorageException("failed to save metadata")
if not txn.put(block_hash, block_data, db=bdb) :
raise StorageException("failed to save block data")
try :
# going to just concatenate all hashes, safe since these are all fixed size
signing_hash_accumulator = expiration.to_bytes(32, byteorder='big', signed=False)
signing_hash_accumulator += b''.join(block_hashes)
signing_hash = hashlib.sha256(signing_hash_accumulator).digest()
signature = self.service_keys.sign(signing_hash, encoding=encoding)
except Exception as e :
logger.error("unknown exception packing response (BlockStatus); %s", str(e))
return StorageException('signature failed')
result = dict()
result['signature'] = signature
result['block_ids'] = list(map(encoding_fn, block_hashes))
return result
def check_block(self, block_id, encoding='b64') :
pass
def check_blocks(self, block_ids, encoding='b64') :
"""Check status of a list of block
:param block_ids list of string: block identifiers
:param encoding string: encoding to use for block identifiers, raw/b64
:return list of dict: list of block status
"""
decoding_fn = lambda x : x
if encoding == 'b64' :
decoding_fn = lambda x : base64.urlsafe_b64decode(x)
current_time = int(time.time())
mdb = self.block_store_env.open_db(b'meta_data')
block_status_list = []
with self.block_store_env.begin() as txn :
for block_id in block_ids :
# use the input format for the output block identifier
block_status = { 'block_id' : block_id, 'size' : 0, 'expiration' : 0 }
block_hash = decoding_fn(block_id)
raw_metadata = txn.get(block_hash, db=mdb)
if raw_metadata :
metadata = BlockMetadata.unpack(raw_metadata)
block_status['size'] = metadata.block_size
block_status['expiration'] = metadata.expiration_time - current_time
if block_status['expiration'] < 0 :
block_status['expiration'] = 0
block_status_list.append(block_status)
return block_status_list
def expire_blocks(self) :
"""Delete data and metadata for blocks that have expired
"""
try :
mdb = self.block_store_env.open_db(b'meta_data')
bdb = self.block_store_env.open_db(b'block_data')
current_time = int(time.time())
count = 0
with self.block_store_env.begin() as txn :
cursor = txn.cursor(db=mdb)
for key, value in cursor :
metadata = BlockMetadata.unpack(value)
if metadata.expiration_time < current_time :
logger.debug('expire block %s',base64.urlsafe_b64encode(key).decode())
count += 1
with self.block_store_env.begin(write=True) as dtxn :
assert dtxn.delete(key, db=bdb)
assert dtxn.delete(key, db=mdb)
logger.info('expired %d blocks', count)
except Exception as e :
logger.error('garbage collection failed; %s', str(e))
return None
return count
| 631
| 0
| 133
|
c47aef6a568a84a910fe9d901ba372f17176b9e3
| 1,435
|
py
|
Python
|
pymc/examples/gp/PyMCmodel.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 1
|
2019-03-01T02:47:20.000Z
|
2019-03-01T02:47:20.000Z
|
pymc/examples/gp/PyMCmodel.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 1
|
2019-08-17T06:58:38.000Z
|
2019-08-17T06:58:38.000Z
|
pymc/examples/gp/PyMCmodel.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | null | null | null |
import pymc as pm
import pymc.gp as gp
from pymc.gp.cov_funs import matern
import numpy as np
import matplotlib.pyplot as pl
from numpy.random import normal
x = np.arange(-1.,1.,.1)
# Prior parameters of C
diff_degree = pm.Uniform('diff_degree', .1, 3)
amp = pm.Lognormal('amp', mu=.4, tau=1.)
scale = pm.Lognormal('scale', mu=.5, tau=1.)
# The covariance dtrm C is valued as a Covariance object.
@pm.deterministic
# Prior parameters of M
a = pm.Normal('a', mu=1., tau=1.)
b = pm.Normal('b', mu=.5, tau=1.)
c = pm.Normal('c', mu=2., tau=1.)
# The mean M is valued as a Mean object.
@pm.deterministic
# The GP itself
fmesh = np.linspace(-np.pi/3.3,np.pi/3.3,4)
f = gp.GP(name="f", M=M, C=C, mesh=fmesh, init_mesh_vals = 0.*fmesh)
# Observation precision
# V = Gamma('V', alpha=3., beta=3./.002, value=.002)
V = .0001
# The data d is just array-valued. It's normally distributed about GP.f(obs_x).
@pm.observed
@pm.stochastic
def d(value=np.random.normal(size=len(fmesh)), mu=f, V=V):
"""
Data
"""
mu_eval = mu(fmesh)
return pm.flib.normal(value, mu_eval, 1./V)
| 25.625
| 89
| 0.65993
|
import pymc as pm
import pymc.gp as gp
from pymc.gp.cov_funs import matern
import numpy as np
import matplotlib.pyplot as pl
from numpy.random import normal
x = np.arange(-1.,1.,.1)
# Prior parameters of C
diff_degree = pm.Uniform('diff_degree', .1, 3)
amp = pm.Lognormal('amp', mu=.4, tau=1.)
scale = pm.Lognormal('scale', mu=.5, tau=1.)
# The covariance dtrm C is valued as a Covariance object.
@pm.deterministic
def C(eval_fun = gp.matern.euclidean, diff_degree=diff_degree, amp=amp, scale=scale):
return gp.FullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale)
# Prior parameters of M
a = pm.Normal('a', mu=1., tau=1.)
b = pm.Normal('b', mu=.5, tau=1.)
c = pm.Normal('c', mu=2., tau=1.)
# The mean M is valued as a Mean object.
def linfun(x, a, b, c):
# return a * x ** 2 + b * x + c
return 0.*x + c
@pm.deterministic
def M(eval_fun = linfun, a=a, b=b, c=c):
return gp.Mean(eval_fun, a=a, b=b, c=c)
# The GP itself
fmesh = np.linspace(-np.pi/3.3,np.pi/3.3,4)
f = gp.GP(name="f", M=M, C=C, mesh=fmesh, init_mesh_vals = 0.*fmesh)
# Observation precision
# V = Gamma('V', alpha=3., beta=3./.002, value=.002)
V = .0001
# The data d is just array-valued. It's normally distributed about GP.f(obs_x).
@pm.observed
@pm.stochastic
def d(value=np.random.normal(size=len(fmesh)), mu=f, V=V):
"""
Data
"""
mu_eval = mu(fmesh)
return pm.flib.normal(value, mu_eval, 1./V)
| 275
| 0
| 66
|
1ff0470a82f8e325dec69db737d3ded8bf488a17
| 734
|
py
|
Python
|
HW4/PyMaxflow-master/examples/simple.py
|
ardaduz/math-cgv
|
bc89c0ce9beca9a9f02ca23bcf4a9116be187882
|
[
"MIT"
] | null | null | null |
HW4/PyMaxflow-master/examples/simple.py
|
ardaduz/math-cgv
|
bc89c0ce9beca9a9f02ca23bcf4a9116be187882
|
[
"MIT"
] | null | null | null |
HW4/PyMaxflow-master/examples/simple.py
|
ardaduz/math-cgv
|
bc89c0ce9beca9a9f02ca23bcf4a9116be187882
|
[
"MIT"
] | 1
|
2021-02-14T10:41:17.000Z
|
2021-02-14T10:41:17.000Z
|
import maxflow
# Create a graph with integer capacities.
g = maxflow.Graph[int](2, 2)
# Add two (non-terminal) nodes. Get the index to the first one.
nodes = g.add_nodes(2)
# Create two edges (forwards and backwards) with the given capacities.
# The indices of the nodes are always consecutive.
g.add_edge(nodes[0], nodes[1], 1, 2)
# Set the capacities of the terminal edges...
# ...for the first node.
g.add_tedge(nodes[0], 2, 5)
# ...for the second node.
g.add_tedge(nodes[1], 9, 4)
# Find the maxflow.
flow = g.maxflow()
print("Maximum flow: {}".format(flow))
# Print the segment of each node.
print("Segment of the node 0: {}".format(g.get_segment(nodes[0])))
print("Segment of the node 1: {}".format(g.get_segment(nodes[1])))
| 31.913043
| 70
| 0.701635
|
import maxflow
# Create a graph with integer capacities.
g = maxflow.Graph[int](2, 2)
# Add two (non-terminal) nodes. Get the index to the first one.
nodes = g.add_nodes(2)
# Create two edges (forwards and backwards) with the given capacities.
# The indices of the nodes are always consecutive.
g.add_edge(nodes[0], nodes[1], 1, 2)
# Set the capacities of the terminal edges...
# ...for the first node.
g.add_tedge(nodes[0], 2, 5)
# ...for the second node.
g.add_tedge(nodes[1], 9, 4)
# Find the maxflow.
flow = g.maxflow()
print("Maximum flow: {}".format(flow))
# Print the segment of each node.
print("Segment of the node 0: {}".format(g.get_segment(nodes[0])))
print("Segment of the node 1: {}".format(g.get_segment(nodes[1])))
| 0
| 0
| 0
|
bb753e65a660ad6b0da4c898f341c53f5a413d54
| 395
|
py
|
Python
|
storm_analysis/diagnostics/frc/analyze_data.py
|
oxfordni/storm-analysis
|
835a5c17497c563c3632db561ae7e7c9144a8dd1
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/diagnostics/frc/analyze_data.py
|
oxfordni/storm-analysis
|
835a5c17497c563c3632db561ae7e7c9144a8dd1
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/diagnostics/frc/analyze_data.py
|
oxfordni/storm-analysis
|
835a5c17497c563c3632db561ae7e7c9144a8dd1
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/env python
"""
Analyze FRC data.
Hazen 01/18
"""
import glob
import storm_analysis.frc.frc_calc2d as frcCalc2d
dirs = sorted(glob.glob("test*"))
total_time = 0.0
for a_dir in dirs:
print()
print("Analyzing:", a_dir)
print()
hdf5 = a_dir + "/test.hdf5"
frc_text = a_dir + "/frc.txt"
# Run FRC analysis.
frcCalc2d.frcCalc2d(hdf5, frc_text)
print()
| 15.8
| 49
| 0.643038
|
#!/usr/bin/env python
"""
Analyze FRC data.
Hazen 01/18
"""
import glob
import storm_analysis.frc.frc_calc2d as frcCalc2d
dirs = sorted(glob.glob("test*"))
total_time = 0.0
for a_dir in dirs:
print()
print("Analyzing:", a_dir)
print()
hdf5 = a_dir + "/test.hdf5"
frc_text = a_dir + "/frc.txt"
# Run FRC analysis.
frcCalc2d.frcCalc2d(hdf5, frc_text)
print()
| 0
| 0
| 0
|