hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
369f049ae0b13fdb9e96a59e13ac26b4fcb70360
| 682
|
py
|
Python
|
build/common/commands/console.py
|
paulator/frappe_docker
|
2eecd1311dd427d94ba86b80253441444ff4e078
|
[
"MIT"
] | 1
|
2020-10-20T15:16:13.000Z
|
2020-10-20T15:16:13.000Z
|
build/common/commands/console.py
|
paulator/frappe_docker
|
2eecd1311dd427d94ba86b80253441444ff4e078
|
[
"MIT"
] | null | null | null |
build/common/commands/console.py
|
paulator/frappe_docker
|
2eecd1311dd427d94ba86b80253441444ff4e078
|
[
"MIT"
] | null | null | null |
import sys
import frappe
import IPython
from frappe.utils import get_sites
def console(site):
"Start ipython console for a site"
if site not in get_sites():
print("Site {0} does not exist on the current bench".format(site))
return
frappe.init(site=site)
frappe.connect()
frappe.local.lang = frappe.db.get_default("lang")
all_apps = frappe.get_installed_apps()
for app in all_apps:
locals()[app] = __import__(app)
print("Apps in this namespace:\n{}".format(", ".join(all_apps)))
IPython.embed(display_banner="", header="")
def main():
site = sys.argv[-1]
console(site)
if __name__ == "__main__":
main()
| 22
| 74
| 0.653959
|
abbd879fc396ce6a8498b092fe88de6981528ff7
| 568
|
py
|
Python
|
PyInstaller/hooks/rthooks/pyi_rth_gi.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/hooks/rthooks/pyi_rth_gi.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/hooks/rthooks/pyi_rth_gi.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2015-2022, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
import os
import sys
os.environ['GI_TYPELIB_PATH'] = os.path.join(sys._MEIPASS, 'gi_typelibs')
| 35.5
| 78
| 0.542254
|
af8794b26d6a40516a5bad70c9168899af32d479
| 1,767
|
py
|
Python
|
onlinecourse/migrations/0002_auto_20220301_1522.py
|
nirjharlo/django-edx
|
a4f7382e39e1a9e6733b9b80a026ba2f95f4f2d0
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0002_auto_20220301_1522.py
|
nirjharlo/django-edx
|
a4f7382e39e1a9e6733b9b80a026ba2f95f4f2d0
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0002_auto_20220301_1522.py
|
nirjharlo/django-edx
|
a4f7382e39e1a9e6733b9b80a026ba2f95f4f2d0
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2022-03-01 15:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=1028)),
('is_correct', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.enrollment')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=1028)),
('grade', models.IntegerField(default=0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
('lesson_id', models.ManyToManyField(to='onlinecourse.Lesson')),
],
),
migrations.AddField(
model_name='choice',
name='questions',
field=models.ManyToManyField(to='onlinecourse.Question'),
),
]
| 38.413043
| 125
| 0.582909
|
986e3336ba174956d31f86d8ff6a9d817581fc64
| 186
|
py
|
Python
|
myhdfs/errors.py
|
Conchsk/mlapt
|
1b88b50ee1c812d86affc2f2fc5fc108dc9f1022
|
[
"MIT"
] | null | null | null |
myhdfs/errors.py
|
Conchsk/mlapt
|
1b88b50ee1c812d86affc2f2fc5fc108dc9f1022
|
[
"MIT"
] | null | null | null |
myhdfs/errors.py
|
Conchsk/mlapt
|
1b88b50ee1c812d86affc2f2fc5fc108dc9f1022
|
[
"MIT"
] | null | null | null |
class FileNotFound(Exception):
pass
class UnsupportedMode(Exception):
pass
class UnsupportedOperation(Exception):
pass
class InvalidParameterValue(Exception):
pass
| 12.4
| 39
| 0.752688
|
3d6aadfec919b9ac114a2f51a86ca57e70b4e239
| 490
|
py
|
Python
|
lib/android/service.py
|
TE-ToshiakiTanaka/stve-project
|
822985d0daad7cb63b96f4b0a64539931aebf06e
|
[
"MIT"
] | null | null | null |
lib/android/service.py
|
TE-ToshiakiTanaka/stve-project
|
822985d0daad7cb63b96f4b0a64539931aebf06e
|
[
"MIT"
] | null | null | null |
lib/android/service.py
|
TE-ToshiakiTanaka/stve-project
|
822985d0daad7cb63b96f4b0a64539931aebf06e
|
[
"MIT"
] | null | null | null |
__version__ = (0, 2, 0)
import os
import sys
LIB_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not LIB_PATH in sys.path:
sys.path.insert(0, LIB_PATH)
from android import module
from android.module import Android
class Factory(object):
def __init__(self):
pass
def version(self):
return __version__
def get(self, serial, host=module.PROFILE_PATH):
return Android(serial, host)
NAME = "stvex.android"
FACTORY = Factory()
| 19.6
| 70
| 0.697959
|
79e013d8ae35ce96f9a69ecc43b7c209bdcb65bb
| 3,652
|
py
|
Python
|
tests/importer/test_springer_dojson.py
|
kpsherva/cds-ils
|
8eeeb6e03784756ed24895c8d030682f9d733e8a
|
[
"MIT"
] | null | null | null |
tests/importer/test_springer_dojson.py
|
kpsherva/cds-ils
|
8eeeb6e03784756ed24895c8d030682f9d733e8a
|
[
"MIT"
] | null | null | null |
tests/importer/test_springer_dojson.py
|
kpsherva/cds-ils
|
8eeeb6e03784756ed24895c8d030682f9d733e8a
|
[
"MIT"
] | null | null | null |
import os
from cds_dojson.marc21.utils import create_record
from cds_ils.importer.providers.springer.springer import model
marcxml = (
"""<collection xmlns="http://www.loc.gov/MARC21/slim">"""
"""<record>{0}</record></collection>"""
)
def check_transformation(marcxml_body, json_body):
"""Check transformation."""
blob = create_record(marcxml.format(marcxml_body))
record = {}
record.update(**model.do(blob, ignore_missing=True))
expected = {}
expected.update(**json_body)
assert record == expected
def test_springer_transformation(app):
"""Test springer record import translation."""
dirname = os.path.join(os.path.dirname(__file__), "data")
with open(os.path.join(dirname, "springer_record.xml"), "r") as fp:
example = fp.read()
with app.app_context():
check_transformation(
example,
{
"_eitem": {
"internal_note": "Physics and Astronomy (R0) "
"(SpringerNature-43715)",
"urls": [
{
"description": "E-book by Springer",
"value": "https://doi.org/10.1007/b100336",
}
],
},
"provider_recid": "978-0-306-47915-1",
"_serial": [
{
"title": "Advances in Nuclear Physics ;",
"volume": "26",
}
],
"abstract": "The four articles ...",
"agency_code": "DE-He213",
"alternative_titles": [
{"type": "SUBTITLE", "value": "Volume 26 /"}
],
"alternative_identifiers": [
{"scheme": "SPRINGER", "value": "978-0-306-47915-1"}
],
"authors": [
{"full_name": "Negele, J.W.", "roles": ["EDITOR"]},
{"full_name": "Vogt, Erich W.", "roles": ["EDITOR"]},
],
"document_type": "BOOK",
"edition": "1st 2001.",
"identifiers": [
{"scheme": "ISBN", "value": "9780306479151"},
{"scheme": "ISBN", "value": "9780306479151X"},
],
"imprint": {
"date": "2001.",
"place": "New York, NY :",
"publisher": "Springer US :, Imprint: Springer,",
},
"keywords": [
{"source": "SPR", "value": "Nuclear physics."},
{"source": "SPR", "value": "Heavy ions."},
{
"source": "SPR",
"value": "Nuclear Physics, Heavy Ions, Hadrons.",
},
],
"number_of_pages": "386",
"publication_year": "2001.",
"subjects": [
{"scheme": "LoC", "value": "QC770-798"},
{"scheme": "LoC", "value": "QC702.7.H42"},
{"scheme": "Dewey", "value": "539.7092"},
],
"table_of_content": [
"The Spin Structure of the Nucleon",
"Liquid-Gas Phase Transition in Nuclear "
"Multifragmentation",
"High Spin Properties of Atomic Nuclei",
"The Deuteron: Structure and Form Factors.",
],
"title": "Advances in Nuclear Physics",
},
)
| 36.52
| 73
| 0.426342
|
9a44cc781765ed39ed4b98bf19f384db03570ab6
| 57,633
|
py
|
Python
|
cirq-core/cirq/ops/pauli_string.py
|
mkeshita/Cirq
|
06b5a5bd929757bf4bafe32b16bc4fabb36f3390
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/ops/pauli_string.py
|
mkeshita/Cirq
|
06b5a5bd929757bf4bafe32b16bc4fabb36f3390
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/ops/pauli_string.py
|
mkeshita/Cirq
|
06b5a5bd929757bf4bafe32b16bc4fabb36f3390
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import math
import numbers
from typing import (
Any,
cast,
Dict,
ItemsView,
Iterable,
Iterator,
KeysView,
List,
Mapping,
Optional,
overload,
Sequence,
SupportsComplex,
Tuple,
TYPE_CHECKING,
TypeVar,
Union,
ValuesView,
AbstractSet,
Callable,
Generic,
)
import numpy as np
from cirq import value, protocols, linalg, qis
from cirq._doc import document
from cirq._import import LazyLoader
from cirq.ops import (
clifford_gate,
common_gates,
gate_operation,
global_phase_op,
identity,
op_tree,
pauli_gates,
pauli_interaction_gate,
raw_types,
)
from cirq.type_workarounds import NotImplementedType
if TYPE_CHECKING:
import cirq
# Lazy imports to break circular dependencies.
linear_combinations = LazyLoader("linear_combinations", globals(), "cirq.ops.linear_combinations")
TDefault = TypeVar('TDefault')
TKey = TypeVar('TKey', bound=raw_types.Qid)
TKeyNew = TypeVar('TKeyNew', bound=raw_types.Qid)
TKeyOther = TypeVar('TKeyOther', bound=raw_types.Qid)
# A value that can be unambiguously converted into a `cirq.PauliString`.
PAULI_STRING_LIKE = Union[
complex,
'cirq.OP_TREE',
Mapping[TKey, 'cirq.PAULI_GATE_LIKE'],
Iterable, # of PAULI_STRING_LIKE, but mypy doesn't do recursive types yet.
]
document(
PAULI_STRING_LIKE, # type: ignore
"""A `cirq.PauliString` or a value that can easily be converted into one.
Complex numbers turn into the coefficient of an empty Pauli string.
Dictionaries from qubit to Pauli operation are wrapped into a Pauli string.
Each Pauli operation can be specified as a cirq object (e.g. `cirq.X`) or as
a string (e.g. `"X"`) or as an integer where 0=I, 1=X, 2=Y, 3=Z.
Collections of Pauli operations are recursively multiplied into a single
Pauli string.
""",
)
PAULI_GATE_LIKE = Union[
'cirq.Pauli',
'cirq.IdentityGate',
str,
int,
]
document(
PAULI_GATE_LIKE, # type: ignore
"""An object that can be interpreted as a Pauli gate.
Allowed values are:
1. Cirq gates: `cirq.I`, `cirq.X`, `cirq.Y`, `cirq.Z`.
2. Strings: "I", "X", "Y", "Z". Equivalently "i", "x", "y", "z".
3. Integers from 0 to 3, with the convention 0=I, 1=X, 2=Y, 3=Z.
""",
)
@value.value_equality(approximate=True, manual_cls=True)
class PauliString(raw_types.Operation, Generic[TKey]):
def __init__(
self,
*contents: 'cirq.PAULI_STRING_LIKE',
qubit_pauli_map: Optional[Dict[TKey, 'cirq.Pauli']] = None,
coefficient: Union[int, float, complex] = 1,
):
"""Initializes a new PauliString.
Examples:
>>> a, b, c = cirq.LineQubit.range(3)
>>> print(cirq.PauliString([cirq.X(a), cirq.X(a)]))
I
>>> print(cirq.PauliString(-1, cirq.X(a), cirq.Y(b), cirq.Z(c)))
-X(0)*Y(1)*Z(2)
>>> print(cirq.PauliString({a: cirq.X}, [-2, 3, cirq.Y(a)]))
-6j*Z(0)
>>> print(cirq.PauliString({a: cirq.I, b: cirq.X}))
X(1)
>>> print(cirq.PauliString({a: cirq.Y},
... qubit_pauli_map={a: cirq.X}))
1j*Z(0)
Args:
*contents: A value or values to convert into a pauli string. This
can be a number, a pauli operation, a dictionary from qubit to
pauli/identity gates, or collections thereof. If a list of
values is given, they are each individually converted and then
multiplied from left to right in order.
qubit_pauli_map: Initial dictionary mapping qubits to pauli
operations. Defaults to the empty dictionary. Note that, unlike
dictionaries passed to contents, this dictionary must not
contain any identity gate values. Further note that this
argument specifies values that are logically *before* factors
specified in `contents`; `contents` are *right* multiplied onto
the values in this dictionary.
coefficient: Initial scalar coefficient. Defaults to 1.
Raises:
TypeError: If the `qubit_pauli_map` has values that are not Paulis.
"""
if qubit_pauli_map is not None:
for v in qubit_pauli_map.values():
if not isinstance(v, pauli_gates.Pauli):
raise TypeError(f'{v} is not a Pauli')
self._qubit_pauli_map: Dict[TKey, 'cirq.Pauli'] = qubit_pauli_map or {}
self._coefficient = complex(coefficient)
if contents:
m = self.mutable_copy().inplace_left_multiply_by(contents).frozen()
self._qubit_pauli_map = m._qubit_pauli_map
self._coefficient = m._coefficient
@property
def coefficient(self) -> complex:
return self._coefficient
def _value_equality_values_(self):
if len(self._qubit_pauli_map) == 1 and self.coefficient == 1:
q, p = list(self._qubit_pauli_map.items())[0]
return gate_operation.GateOperation(p, [q])._value_equality_values_()
return (frozenset(self._qubit_pauli_map.items()), self._coefficient)
def _json_dict_(self) -> Dict[str, Any]:
return {
# JSON requires mappings to have string keys.
'qubit_pauli_map': list(self._qubit_pauli_map.items()),
'coefficient': self.coefficient,
}
@classmethod
def _from_json_dict_(cls, qubit_pauli_map, coefficient, **kwargs):
return cls(qubit_pauli_map=dict(qubit_pauli_map), coefficient=coefficient)
def _value_equality_values_cls_(self):
if len(self._qubit_pauli_map) == 1 and self.coefficient == 1:
return gate_operation.GateOperation
return PauliString
def equal_up_to_coefficient(self, other: 'cirq.PauliString') -> bool:
return self._qubit_pauli_map == other._qubit_pauli_map
def __getitem__(self, key: TKey) -> pauli_gates.Pauli:
return self._qubit_pauli_map[key]
# pylint: disable=function-redefined
@overload
def get(self, key: Any, default: None = None) -> pauli_gates.Pauli:
pass
@overload
def get(self, key: Any, default: TDefault) -> Union[pauli_gates.Pauli, TDefault]:
pass
def get(self, key: Any, default=None):
return self._qubit_pauli_map.get(key, default)
@overload
def __mul__( # type: ignore
self, other: 'cirq.PauliString[TKeyOther]'
) -> 'cirq.PauliString[Union[TKey, TKeyOther]]':
pass
@overload
def __mul__(
self, other: Mapping[TKeyOther, 'cirq.PAULI_GATE_LIKE']
) -> 'cirq.PauliString[Union[TKey, TKeyOther]]':
pass
@overload
def __mul__(
self, other: Iterable['cirq.PAULI_STRING_LIKE']
) -> 'cirq.PauliString[Union[TKey, cirq.Qid]]':
pass
@overload
def __mul__(self, other: 'cirq.Operation') -> 'cirq.PauliString[Union[TKey, cirq.Qid]]':
pass
@overload
def __mul__(
self, other: Union[complex, int, float, numbers.Number]
) -> 'cirq.PauliString[TKey]':
pass
def __mul__(self, other):
known = False
if isinstance(other, raw_types.Operation) and isinstance(other.gate, identity.IdentityGate):
known = True
elif isinstance(other, (PauliString, numbers.Number)):
known = True
if known:
return PauliString(
cast(PAULI_STRING_LIKE, other),
qubit_pauli_map=self._qubit_pauli_map,
coefficient=self.coefficient,
)
return NotImplemented
# pylint: enable=function-redefined
@property
def gate(self) -> 'cirq.DensePauliString':
order: List[Optional[pauli_gates.Pauli]] = [
None,
pauli_gates.X,
pauli_gates.Y,
pauli_gates.Z,
]
from cirq.ops.dense_pauli_string import DensePauliString
return DensePauliString(
coefficient=self.coefficient, pauli_mask=[order.index(self[q]) for q in self.qubits]
)
def __rmul__(self, other) -> 'PauliString':
if isinstance(other, numbers.Number):
return PauliString(
qubit_pauli_map=self._qubit_pauli_map,
coefficient=self._coefficient * complex(cast(SupportsComplex, other)),
)
if isinstance(other, raw_types.Operation) and isinstance(other.gate, identity.IdentityGate):
return self
# Note: PauliString case handled by __mul__.
return NotImplemented
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return PauliString(
qubit_pauli_map=self._qubit_pauli_map,
coefficient=self._coefficient / complex(cast(SupportsComplex, other)),
)
return NotImplemented
def __add__(self, other):
return linear_combinations.PauliSum.from_pauli_strings(self).__add__(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return linear_combinations.PauliSum.from_pauli_strings(self).__sub__(other)
def __rsub__(self, other):
return -self.__sub__(other)
def __contains__(self, key: TKey) -> bool:
return key in self._qubit_pauli_map
def _decompose_(self):
if not self._has_unitary_():
return None
return [
*(
[]
if self.coefficient == 1
else [global_phase_op.global_phase_operation(self.coefficient)]
),
*[self[q].on(q) for q in self.qubits],
]
def keys(self) -> KeysView[TKey]:
return self._qubit_pauli_map.keys()
@property
def qubits(self) -> Tuple[TKey, ...]:
return tuple(self.keys())
def _circuit_diagram_info_(self, args: 'cirq.CircuitDiagramInfoArgs') -> List[str]:
if not len(self._qubit_pauli_map):
return NotImplemented
qs = args.known_qubits or list(self.keys())
symbols = list(str(self.get(q)) for q in qs)
if self.coefficient == 1:
prefix = '+'
elif self.coefficient == -1:
prefix = '-'
elif self.coefficient == 1j:
prefix = 'i'
elif self.coefficient == -1j:
prefix = '-i'
else:
prefix = f'({args.format_complex(self.coefficient)})*'
symbols[0] = f'PauliString({prefix}{symbols[0]})'
return symbols
def with_qubits(self, *new_qubits: 'cirq.Qid') -> 'PauliString':
return PauliString(
qubit_pauli_map=dict(zip(new_qubits, (self[q] for q in self.qubits))),
coefficient=self._coefficient,
)
def with_coefficient(self, new_coefficient: Union[int, float, complex]) -> 'PauliString':
return PauliString(qubit_pauli_map=dict(self._qubit_pauli_map), coefficient=new_coefficient)
def values(self) -> ValuesView[pauli_gates.Pauli]:
return self._qubit_pauli_map.values()
def items(self) -> ItemsView[TKey, pauli_gates.Pauli]:
return self._qubit_pauli_map.items()
def frozen(self) -> 'cirq.PauliString':
"""Returns a cirq.PauliString with the same contents."""
return self
def mutable_copy(self) -> 'cirq.MutablePauliString':
"""Returns a new cirq.MutablePauliString with the same contents."""
return MutablePauliString(
coefficient=self.coefficient,
pauli_int_dict={
q: PAULI_GATE_LIKE_TO_INDEX_MAP[p] for q, p in self._qubit_pauli_map.items()
},
)
def __iter__(self) -> Iterator[TKey]:
return iter(self._qubit_pauli_map.keys())
def __bool__(self):
return bool(self._qubit_pauli_map)
def __len__(self) -> int:
return len(self._qubit_pauli_map)
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Print ASCII diagram in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('cirq.PauliString(...)')
else:
p.text(str(self))
def __repr__(self) -> str:
ordered_qubits = sorted(self.qubits)
prefix = ''
factors = []
if self._coefficient == -1:
prefix = '-'
elif self._coefficient != 1:
factors.append(repr(self._coefficient))
if not ordered_qubits:
factors.append('cirq.PauliString()')
for q in ordered_qubits:
factors.append(repr(cast(raw_types.Gate, self[q]).on(q)))
fused = prefix + '*'.join(factors)
if len(factors) > 1:
return f'({fused})'
return fused
def __str__(self) -> str:
ordered_qubits = sorted(self.qubits)
prefix = ''
factors = []
if self._coefficient == -1:
prefix = '-'
elif self._coefficient != 1:
factors.append(repr(self._coefficient))
if not ordered_qubits:
factors.append('I')
for q in ordered_qubits:
factors.append(str(cast(raw_types.Gate, self[q]).on(q)))
return prefix + '*'.join(factors)
def matrix(self, qubits: Optional[Iterable[TKey]] = None) -> np.ndarray:
"""Returns the matrix of self in computational basis of qubits.
Args:
qubits: Ordered collection of qubits that determine the subspace
in which the matrix representation of the Pauli string is to
be computed. Qubits absent from self.qubits are acted on by
the identity. Defaults to self.qubits.
"""
qubits = self.qubits if qubits is None else qubits
factors = [self.get(q, default=identity.I) for q in qubits]
return linalg.kron(self.coefficient, *[protocols.unitary(f) for f in factors])
def _has_unitary_(self) -> bool:
return abs(1 - abs(self.coefficient)) < 1e-6
def _unitary_(self) -> Optional[np.ndarray]:
if not self._has_unitary_():
return None
return self.matrix()
def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs'):
if not self._has_unitary_():
return None
if self.coefficient != 1:
args.target_tensor *= self.coefficient
return protocols.apply_unitaries([self[q].on(q) for q in self.qubits], self.qubits, args)
def expectation_from_state_vector(
self,
state_vector: np.ndarray,
qubit_map: Mapping[TKey, int],
*,
atol: float = 1e-7,
check_preconditions: bool = True,
) -> float:
r"""Evaluate the expectation of this PauliString given a state vector.
Compute the expectation value of this PauliString with respect to a
state vector. By convention expectation values are defined for Hermitian
operators, and so this method will fail if this PauliString is
non-Hermitian.
`state` must be an array representation of a state vector and have
shape `(2 ** n, )` or `(2, 2, ..., 2)` (n entries) where `state` is
expressed over n qubits.
`qubit_map` must assign an integer index to each qubit in this
PauliString that determines which bit position of a computational basis
state that qubit corresponds to. For example if `state` represents
$|0\rangle |+\rangle$ and `q0, q1 = cirq.LineQubit.range(2)` then:
cirq.X(q0).expectation(state, qubit_map={q0: 0, q1: 1}) = 0
cirq.X(q0).expectation(state, qubit_map={q0: 1, q1: 0}) = 1
Args:
state_vector: An array representing a valid state vector.
qubit_map: A map from all qubits used in this PauliString to the
indices of the qubits that `state_vector` is defined over.
atol: Absolute numerical tolerance.
check_preconditions: Whether to check that `state_vector` represents
a valid state vector.
Returns:
The expectation value of the input state.
Raises:
NotImplementedError: If this PauliString is non-Hermitian.
TypeError: If the input state is not complex.
ValueError: If the input state does not have the correct shape.
"""
if abs(self.coefficient.imag) > 0.0001:
raise NotImplementedError(
'Cannot compute expectation value of a non-Hermitian '
f'PauliString <{self}>. Coefficient must be real.'
)
# FIXME: Avoid enforce specific complex type. This is necessary to
# prevent an `apply_unitary` bug (Issue #2041).
if state_vector.dtype.kind != 'c':
raise TypeError("Input state dtype must be np.complex64 or np.complex128")
size = state_vector.size
num_qubits = size.bit_length() - 1
if len(state_vector.shape) != 1 and state_vector.shape != (2,) * num_qubits:
raise ValueError(
"Input array does not represent a state vector "
"with shape `(2 ** n,)` or `(2, ..., 2)`."
)
_validate_qubit_mapping(qubit_map, self.qubits, num_qubits)
if check_preconditions:
qis.validate_normalized_state_vector(
state_vector=state_vector,
qid_shape=(2,) * num_qubits,
dtype=state_vector.dtype,
atol=atol,
)
return self._expectation_from_state_vector_no_validation(state_vector, qubit_map)
def _expectation_from_state_vector_no_validation(
self, state_vector: np.ndarray, qubit_map: Mapping[TKey, int]
) -> float:
"""Evaluate the expectation of this PauliString given a state vector.
This method does not provide input validation. See
`PauliString.expectation_from_state_vector` for function description.
Args:
state_vector: An array representing a valid state vector.
qubit_map: A map from all qubits used in this PauliString to the
indices of the qubits that `state` is defined over.
Returns:
The expectation value of the input state.
"""
if len(state_vector.shape) == 1:
num_qubits = state_vector.shape[0].bit_length() - 1
state_vector = np.reshape(state_vector, (2,) * num_qubits)
ket = np.copy(state_vector)
for qubit, pauli in self.items():
buffer = np.empty(ket.shape, dtype=state_vector.dtype)
args = protocols.ApplyUnitaryArgs(
target_tensor=ket, available_buffer=buffer, axes=(qubit_map[qubit],)
)
ket = protocols.apply_unitary(pauli, args)
return self.coefficient * (
np.tensordot(state_vector.conj(), ket, axes=len(ket.shape)).item()
)
def expectation_from_density_matrix(
self,
state: np.ndarray,
qubit_map: Mapping[TKey, int],
*,
atol: float = 1e-7,
check_preconditions: bool = True,
) -> float:
r"""Evaluate the expectation of this PauliString given a density matrix.
Compute the expectation value of this PauliString with respect to an
array representing a density matrix. By convention expectation values
are defined for Hermitian operators, and so this method will fail if
this PauliString is non-Hermitian.
`state` must be an array representation of a density matrix and have
shape `(2 ** n, 2 ** n)` or `(2, 2, ..., 2)` (2*n entries), where
`state` is expressed over n qubits.
`qubit_map` must assign an integer index to each qubit in this
PauliString that determines which bit position of a computational basis
state that qubit corresponds to. For example if `state` represents
$|0\rangle |+\rangle$ and `q0, q1 = cirq.LineQubit.range(2)` then:
cirq.X(q0).expectation(state, qubit_map={q0: 0, q1: 1}) = 0
cirq.X(q0).expectation(state, qubit_map={q0: 1, q1: 0}) = 1
Args:
state: An array representing a valid density matrix.
qubit_map: A map from all qubits used in this PauliString to the
indices of the qubits that `state` is defined over.
atol: Absolute numerical tolerance.
check_preconditions: Whether to check that `state` represents a
valid density matrix.
Returns:
The expectation value of the input state.
Raises:
NotImplementedError: If this PauliString is non-Hermitian.
TypeError: If the input state is not complex.
ValueError: If the input state does not have the correct shape.
"""
if abs(self.coefficient.imag) > 0.0001:
raise NotImplementedError(
'Cannot compute expectation value of a non-Hermitian '
f'PauliString <{self}>. Coefficient must be real.'
)
# FIXME: Avoid enforcing specific complex type. This is necessary to
# prevent an `apply_unitary` bug (Issue #2041).
if state.dtype.kind != 'c':
raise TypeError("Input state dtype must be np.complex64 or np.complex128")
size = state.size
num_qubits = int(np.sqrt(size)).bit_length() - 1
dim = 1 << num_qubits
if state.shape != (dim, dim) and state.shape != (2, 2) * num_qubits:
raise ValueError(
"Input array does not represent a density matrix "
"with shape `(2 ** n, 2 ** n)` or `(2, ..., 2)`."
)
_validate_qubit_mapping(qubit_map, self.qubits, num_qubits)
if check_preconditions:
# Do not enforce reshaping if the state all axes are dimension 2.
_ = qis.to_valid_density_matrix(
density_matrix_rep=state.reshape(dim, dim),
num_qubits=num_qubits,
dtype=state.dtype,
atol=atol,
)
return self._expectation_from_density_matrix_no_validation(state, qubit_map)
def _expectation_from_density_matrix_no_validation(
self, state: np.ndarray, qubit_map: Mapping[TKey, int]
) -> float:
"""Evaluate the expectation of this PauliString given a density matrix.
This method does not provide input validation. See
`PauliString.expectation_from_density_matrix` for function description.
Args:
state: An array representing a valid density matrix.
qubit_map: A map from all qubits used in this PauliString to the
indices of the qubits that `state` is defined over.
Returns:
The expectation value of the input state.
"""
result = np.copy(state)
if len(state.shape) == 2:
num_qubits = state.shape[0].bit_length() - 1
result = np.reshape(result, (2,) * num_qubits * 2)
for qubit, pauli in self.items():
buffer = np.empty(result.shape, dtype=state.dtype)
args = protocols.ApplyUnitaryArgs(
target_tensor=result, available_buffer=buffer, axes=(qubit_map[qubit],)
)
result = protocols.apply_unitary(pauli, args)
while any(result.shape):
result = np.trace(result, axis1=0, axis2=len(result.shape) // 2)
return result * self.coefficient
def zip_items(
self, other: 'cirq.PauliString[TKey]'
) -> Iterator[Tuple[TKey, Tuple[pauli_gates.Pauli, pauli_gates.Pauli]]]:
for qubit, pauli0 in self.items():
if qubit in other:
yield qubit, (pauli0, other[qubit])
def zip_paulis(
self, other: 'cirq.PauliString'
) -> Iterator[Tuple[pauli_gates.Pauli, pauli_gates.Pauli]]:
return (paulis for qubit, paulis in self.zip_items(other))
def _commutes_(
self, other: Any, *, atol: Union[int, float] = 1e-8
) -> Union[bool, NotImplementedType, None]:
if not isinstance(other, PauliString):
return NotImplemented
return sum(not protocols.commutes(p0, p1) for p0, p1 in self.zip_paulis(other)) % 2 == 0
def __neg__(self) -> 'PauliString':
return PauliString(qubit_pauli_map=self._qubit_pauli_map, coefficient=-self._coefficient)
def __pos__(self) -> 'PauliString':
return self
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Override behavior of numpy's exp method."""
if ufunc == np.exp and len(inputs) == 1 and inputs[0] is self:
return math.e ** self
return NotImplemented
def __pow__(self, power):
if power == 1:
return self
if power == -1:
return PauliString(
qubit_pauli_map=self._qubit_pauli_map, coefficient=self.coefficient ** -1
)
if isinstance(power, (int, float)):
r, i = cmath.polar(self.coefficient)
if abs(r - 1) > 0.0001:
# Raising non-unitary PauliStrings to a power is not supported.
return NotImplemented
if len(self) == 1:
q, p = next(iter(self.items()))
gates = {
pauli_gates.X: common_gates.XPowGate,
pauli_gates.Y: common_gates.YPowGate,
pauli_gates.Z: common_gates.ZPowGate,
}
return gates[p](exponent=power).on(q)
global_half_turns = power * (i / math.pi)
# HACK: Avoid circular dependency.
from cirq.ops import pauli_string_phasor
return pauli_string_phasor.PauliStringPhasor(
PauliString(qubit_pauli_map=self._qubit_pauli_map),
exponent_neg=global_half_turns + power,
exponent_pos=global_half_turns,
)
return NotImplemented
def __rpow__(self, base):
if isinstance(base, (int, float)) and base > 0:
if abs(self.coefficient.real) > 0.0001:
raise NotImplementedError(
'Exponentiated to a non-Hermitian PauliString '
f'<{base}**{self}>. Coefficient must be imaginary.'
)
half_turns = 2 * math.log(base) * (-self.coefficient.imag / math.pi)
if len(self) == 1:
q, p = next(iter(self.items()))
gates = {
pauli_gates.X: common_gates.XPowGate,
pauli_gates.Y: common_gates.YPowGate,
pauli_gates.Z: common_gates.ZPowGate,
}
return gates[p](exponent=half_turns, global_shift=-0.5).on(q)
# HACK: Avoid circular dependency.
from cirq.ops import pauli_string_phasor
return pauli_string_phasor.PauliStringPhasor(
PauliString(qubit_pauli_map=self._qubit_pauli_map),
exponent_neg=+half_turns / 2,
exponent_pos=-half_turns / 2,
)
return NotImplemented
def map_qubits(self, qubit_map: Dict[TKey, TKeyNew]) -> 'cirq.PauliString[TKeyNew]':
new_qubit_pauli_map = {qubit_map[qubit]: pauli for qubit, pauli in self.items()}
return PauliString(qubit_pauli_map=new_qubit_pauli_map, coefficient=self._coefficient)
def to_z_basis_ops(self) -> Iterator[raw_types.Operation]:
"""Returns operations to convert the qubits to the computational basis."""
for qubit, pauli in self.items():
yield clifford_gate.SingleQubitCliffordGate.from_single_map(
{pauli: (pauli_gates.Z, False)}
)(qubit)
def dense(self, qubits: Sequence[TKey]) -> 'cirq.DensePauliString':
"""Returns a `cirq.DensePauliString` version of this Pauli string.
This method satisfies the invariant `P.dense(qubits).on(*qubits) == P`.
Args:
qubits: The implicit sequence of qubits used by the dense pauli
string. Specifically, if the returned dense Pauli string was
applied to these qubits (via its `on` method) then the result
would be a Pauli string equivalent to the receiving Pauli
string.
Returns:
A `cirq.DensePauliString` instance `D` such that `D.on(*qubits)`
equals the receiving `cirq.PauliString` instance `P`.
Raises:
ValueError: If the number of qubits is too small.
"""
from cirq.ops.dense_pauli_string import DensePauliString
if not self.keys() <= set(qubits):
raise ValueError('not self.keys() <= set(qubits)')
# pylint: disable=too-many-function-args
pauli_mask = [self.get(q, identity.I) for q in qubits]
# pylint: enable=too-many-function-args
return DensePauliString(pauli_mask, coefficient=self.coefficient)
def conjugated_by(self, clifford: 'cirq.OP_TREE') -> 'PauliString':
r"""Returns the Pauli string conjugated by a clifford operation.
The product-of-Paulis $P$ conjugated by the Clifford operation $C$ is
$$
C^\dagger P C
$$
For example, conjugating a +Y operation by an S operation results in a
+X operation (as opposed to a -X operation).
In a circuit diagram where `P` is a pauli string observable immediately
after a Clifford operation `C`, the pauli string `P.conjugated_by(C)` is
the equivalent pauli string observable just before `C`.
--------------------------C---P---
= ---C---P------------------------
= ---C---P---------C^-1---C-------
= ---C---P---C^-1---------C-------
= --(C^-1 · P · C)--------C-------
= ---P.conjugated_by(C)---C-------
Analogously, a Pauli product P can be moved from before a Clifford C in
a circuit diagram to after the Clifford C by conjugating P by the
inverse of C:
---P---C---------------------------
= -----C---P.conjugated_by(C^-1)---
Args:
clifford: The Clifford operation to conjugate by. This can be an
individual operation, or a tree of operations.
Note that the composite Clifford operation defined by a sequence
of operations is equivalent to a circuit containing those
operations in the given order. Somewhat counter-intuitively,
this means that the operations in the sequence are conjugated
onto the Pauli string in reverse order. For example,
`P.conjugated_by([C1, C2])` is equivalent to
`P.conjugated_by(C2).conjugated_by(C1)`.
Examples:
>>> a, b = cirq.LineQubit.range(2)
>>> print(cirq.X(a).conjugated_by(cirq.CZ(a, b)))
X(0)*Z(1)
>>> print(cirq.X(a).conjugated_by(cirq.S(a)))
-Y(0)
>>> print(cirq.X(a).conjugated_by([cirq.H(a), cirq.CNOT(a, b)]))
Z(0)*X(1)
Returns:
The Pauli string conjugated by the given Clifford operation.
"""
pauli_map = dict(self._qubit_pauli_map)
should_negate = False
for op in list(op_tree.flatten_to_ops(clifford))[::-1]:
if pauli_map.keys().isdisjoint(set(op.qubits)):
continue
for clifford_op in _decompose_into_cliffords(op)[::-1]:
if pauli_map.keys().isdisjoint(set(clifford_op.qubits)):
continue
should_negate ^= _pass_operation_over(pauli_map, clifford_op, False)
coef = -self._coefficient if should_negate else self.coefficient
return PauliString(qubit_pauli_map=pauli_map, coefficient=coef)
def after(self, ops: 'cirq.OP_TREE') -> 'cirq.PauliString':
r"""Determines the equivalent pauli string after some operations.
If the PauliString is $P$ and the Clifford operation is $C$, then the
result is $C P C^\dagger$.
Args:
ops: A stabilizer operation or nested collection of stabilizer
operations.
Returns:
The result of propagating this pauli string from before to after the
given operations.
"""
return self.conjugated_by(protocols.inverse(ops))
def before(self, ops: 'cirq.OP_TREE') -> 'cirq.PauliString':
r"""Determines the equivalent pauli string before some operations.
If the PauliString is $P$ and the Clifford operation is $C$, then the
result is $C^\dagger P C$.
Args:
ops: A stabilizer operation or nested collection of stabilizer
operations.
Returns:
The result of propagating this pauli string from after to before the
given operations.
"""
return self.conjugated_by(ops)
def pass_operations_over(
self, ops: Iterable['cirq.Operation'], after_to_before: bool = False
) -> 'PauliString':
"""Determines how the Pauli string changes when conjugated by Cliffords.
The output and input pauli strings are related by a circuit equivalence.
In particular, this circuit:
───ops───INPUT_PAULI_STRING───
will be equivalent to this circuit:
───OUTPUT_PAULI_STRING───ops───
up to global phase (assuming `after_to_before` is not set).
If ops together have matrix C, the Pauli string has matrix P, and the
output Pauli string has matrix P', then P' == C^-1 P C up to
global phase.
Setting `after_to_before` inverts the relationship, so that the output
is the input and the input is the output. Equivalently, it inverts C.
Args:
ops: The operations to move over the string.
after_to_before: Determines whether the operations start after the
pauli string, instead of before (and so are moving in the
opposite direction).
"""
pauli_map = dict(self._qubit_pauli_map)
should_negate = False
for op in ops:
if pauli_map.keys().isdisjoint(set(op.qubits)):
continue
decomposed = _decompose_into_cliffords(op)
if not after_to_before:
decomposed = decomposed[::-1]
for clifford_op in decomposed:
if pauli_map.keys().isdisjoint(set(clifford_op.qubits)):
continue
should_negate ^= _pass_operation_over(pauli_map, clifford_op, after_to_before)
coef = -self._coefficient if should_negate else self.coefficient
return PauliString(qubit_pauli_map=pauli_map, coefficient=coef)
def _validate_qubit_mapping(
qubit_map: Mapping[TKey, int], pauli_qubits: Tuple[TKey, ...], num_state_qubits: int
) -> None:
"""Validates that a qubit map is a valid mapping.
This will enforce that all elements of `pauli_qubits` appear in `qubit_map`,
and that the integers in `qubit_map` correspond to valid positions in a
representation of a state over `num_state_qubits`.
Args:
qubit_map: A map from qubits to integers.
pauli_qubits: The qubits that must be contained in `qubit_map`.
num_state_qubits: The number of qubits over which a state is expressed.
Raises:
TypeError: If the qubit map is between the wrong types.
ValueError: If the qubit maps is not complete or does not match with
`num_state_qubits`.
"""
if not isinstance(qubit_map, Mapping) or not all(
isinstance(k, raw_types.Qid) and isinstance(v, int) for k, v in qubit_map.items()
):
raise TypeError(
"Input qubit map must be a valid mapping from Qubit ID's to integer indices."
)
if not set(qubit_map.keys()) >= set(pauli_qubits):
raise ValueError(
"Input qubit map must be a complete mapping over all of this PauliString's qubits."
)
used_inds = [qubit_map[q] for q in pauli_qubits]
if len(used_inds) != len(set(used_inds)) or not set(range(num_state_qubits)) >= set(
sorted(used_inds)
):
raise ValueError(
f'Input qubit map indices must be valid for a state over {num_state_qubits} qubits.'
)
# Ignoring type because mypy believes `with_qubits` methods are incompatible.
class SingleQubitPauliStringGateOperation( # type: ignore
gate_operation.GateOperation, PauliString
):
"""A Pauli operation applied to a qubit.
Satisfies the contract of both GateOperation and PauliString. Relies
implicitly on the fact that PauliString({q: X}) compares as equal to
GateOperation(X, [q]).
"""
def __init__(self, pauli: pauli_gates.Pauli, qubit: 'cirq.Qid'):
PauliString.__init__(self, qubit_pauli_map={qubit: pauli})
gate_operation.GateOperation.__init__(self, cast(raw_types.Gate, pauli), [qubit])
def with_qubits(self, *new_qubits: 'cirq.Qid') -> 'SingleQubitPauliStringGateOperation':
if len(new_qubits) != 1:
raise ValueError("len(new_qubits) != 1")
return SingleQubitPauliStringGateOperation(
cast(pauli_gates.Pauli, self.gate), new_qubits[0]
)
@property
def pauli(self) -> pauli_gates.Pauli:
return cast(pauli_gates.Pauli, self.gate)
@property
def qubit(self) -> raw_types.Qid:
assert len(self.qubits) == 1
return self.qubits[0]
def _as_pauli_string(self) -> PauliString:
return PauliString(qubit_pauli_map={self.qubit: self.pauli})
def __mul__(self, other):
if isinstance(other, SingleQubitPauliStringGateOperation):
return self._as_pauli_string() * other._as_pauli_string()
if isinstance(other, (PauliString, complex, float, int)):
return self._as_pauli_string() * other
return NotImplemented
def __rmul__(self, other):
if isinstance(other, (PauliString, complex, float, int)):
return other * self._as_pauli_string()
return NotImplemented
def __neg__(self):
return -self._as_pauli_string()
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['pauli', 'qubit'])
@classmethod
def _from_json_dict_( # type: ignore
cls, pauli: pauli_gates.Pauli, qubit: 'cirq.Qid', **kwargs
):
# Note, this method is required or else superclasses' deserialization
# would be used
return cls(pauli=pauli, qubit=qubit)
@value.value_equality(unhashable=True, manual_cls=True, approximate=True)
class MutablePauliString(Generic[TKey]):
def __init__(
self,
*contents: 'cirq.PAULI_STRING_LIKE',
coefficient: Union[int, float, complex] = 1,
pauli_int_dict: Optional[Dict[TKey, int]] = None,
):
self.coefficient = complex(coefficient)
self.pauli_int_dict: Dict[TKey, int] = {} if pauli_int_dict is None else pauli_int_dict
if contents:
self.inplace_left_multiply_by(contents)
def _value_equality_values_(self):
return self.frozen()._value_equality_values_()
def _value_equality_values_cls_(self):
return self.frozen()._value_equality_values_cls_()
def _imul_atom_helper(self, key: TKey, pauli_lhs: int, sign: int) -> int:
pauli_old = self.pauli_int_dict.pop(key, 0)
pauli_new = pauli_lhs ^ pauli_old
if pauli_new:
self.pauli_int_dict[key] = pauli_new
if not pauli_lhs or not pauli_old or pauli_lhs == pauli_old:
return 0
if (pauli_old - pauli_lhs) % 3 == 1:
return sign
return -sign
def keys(self) -> AbstractSet[TKey]:
return self.pauli_int_dict.keys()
def values(self) -> Iterator[Union['cirq.Pauli', 'cirq.IdentityGate']]:
for v in self.pauli_int_dict.values():
yield _INT_TO_PAULI[v]
def __iter__(self) -> Iterator[TKey]:
return iter(self.pauli_int_dict)
def __len__(self) -> int:
return len(self.pauli_int_dict)
def __bool__(self) -> bool:
return bool(self.pauli_int_dict)
def frozen(self) -> 'cirq.PauliString':
"""Returns a cirq.PauliString with the same contents.
For example, this is useful because cirq.PauliString is an operation
whereas cirq.MutablePauliString is not.
"""
return PauliString(
coefficient=self.coefficient,
qubit_pauli_map={
q: cast(pauli_gates.Pauli, _INT_TO_PAULI[p])
for q, p in self.pauli_int_dict.items()
if p
},
)
def mutable_copy(self) -> 'cirq.MutablePauliString':
"""Returns a new cirq.MutablePauliString with the same contents."""
return MutablePauliString(
coefficient=self.coefficient,
pauli_int_dict=dict(self.pauli_int_dict),
)
def items(self) -> Iterator[Tuple[TKey, Union['cirq.Pauli', 'cirq.IdentityGate']]]:
for k, v in self.pauli_int_dict.items():
yield k, _INT_TO_PAULI[v]
def __contains__(self, item: Any) -> bool:
return item in self.pauli_int_dict
def __getitem__(self, item: Any) -> Union['cirq.Pauli', 'cirq.IdentityGate']:
return _INT_TO_PAULI[self.pauli_int_dict[item]]
def __setitem__(self, key: TKey, value: 'cirq.PAULI_GATE_LIKE'):
value = _pauli_like_to_pauli_int(key, value)
if value:
self.pauli_int_dict[key] = _pauli_like_to_pauli_int(key, value)
else:
self.pauli_int_dict.pop(key, None)
def __delitem__(self, key: TKey):
del self.pauli_int_dict[key]
# pylint: disable=function-redefined
@overload
def get(
self, key: TKey, default: None = None
) -> Union['cirq.Pauli', 'cirq.IdentityGate', None]:
pass
@overload
def get(
self, key: TKey, default: TDefault
) -> Union['cirq.Pauli', 'cirq.IdentityGate', TDefault]:
pass
def get(self, key: TKey, default=None):
result = self.pauli_int_dict.get(key, None)
return default if result is None else _INT_TO_PAULI[result]
# pylint: enable=function-redefined
def inplace_before(self, ops: 'cirq.OP_TREE') -> 'cirq.MutablePauliString':
r"""Propagates the pauli string from after to before a Clifford effect.
If the old value of the MutablePauliString is $P$ and the Clifford
operation is $C$, then the new value of the MutablePauliString is
$C^\dagger P C$.
Args:
ops: A stabilizer operation or nested collection of stabilizer
operations.
Returns:
The mutable pauli string that was mutated.
"""
return self.inplace_after(protocols.inverse(ops))
def inplace_after(self, ops: 'cirq.OP_TREE') -> 'cirq.MutablePauliString':
r"""Propagates the pauli string from before to after a Clifford effect.
If the old value of the MutablePauliString is $P$ and the Clifford
operation is $C$, then the new value of the MutablePauliString is
$C P C^\dagger$.
Args:
ops: A stabilizer operation or nested collection of stabilizer
operations.
Returns:
The mutable pauli string that was mutated.
Raises:
NotImplementedError: If any ops decompose into an unsupported
Clifford gate.
"""
for clifford in op_tree.flatten_to_ops(ops):
for op in _decompose_into_cliffords(clifford):
ps = [self.pauli_int_dict.pop(cast(TKey, q), 0) for q in op.qubits]
if not any(ps):
continue
gate = op.gate
if isinstance(gate, clifford_gate.SingleQubitCliffordGate):
out = gate.transform(cast(pauli_gates.Pauli, _INT_TO_PAULI[ps[0]]))
if out.flip:
self.coefficient *= -1
self.pauli_int_dict[cast(TKey, op.qubits[0])] = PAULI_GATE_LIKE_TO_INDEX_MAP[
out.to
]
elif isinstance(gate, pauli_interaction_gate.PauliInteractionGate):
q0, q1 = op.qubits
p0 = _INT_TO_PAULI[ps[0]]
p1 = _INT_TO_PAULI[ps[1]]
# Kick across Paulis that anti-commute with the controls.
kickback_0_to_1 = not protocols.commutes(p0, gate.pauli0)
kickback_1_to_0 = not protocols.commutes(p1, gate.pauli1)
kick0 = gate.pauli1 if kickback_0_to_1 else identity.I
kick1 = gate.pauli0 if kickback_1_to_0 else identity.I
self.__imul__({q0: p0, q1: kick0})
self.__imul__({q0: kick1, q1: p1})
# Decompose inverted controls into single-qubit operations.
if gate.invert0:
self.inplace_after(gate.pauli1(q1))
if gate.invert1:
self.inplace_after(gate.pauli0(q0))
else:
# coverage: ignore
raise NotImplementedError(f"Unrecognized decomposed Clifford: {op!r}")
return self
def _imul_helper(self, other: 'cirq.PAULI_STRING_LIKE', sign: int):
"""Left-multiplies or right-multiplies by a PAULI_STRING_LIKE.
Args:
other: What to multiply by.
sign: +1 to left-multiply, -1 to right-multiply.
Returns:
self on success, NotImplemented given an unknown type of value.
"""
if isinstance(other, (Mapping, PauliString, MutablePauliString)):
if isinstance(other, (PauliString, MutablePauliString)):
self.coefficient *= other.coefficient
phase_log_i = 0
for qubit, pauli_gate_like in other.items():
pauli_int = _pauli_like_to_pauli_int(qubit, pauli_gate_like)
phase_log_i += self._imul_atom_helper(cast(TKey, qubit), pauli_int, sign)
self.coefficient *= 1j ** (phase_log_i & 3)
elif isinstance(other, numbers.Number):
self.coefficient *= complex(cast(SupportsComplex, other))
elif isinstance(other, raw_types.Operation) and isinstance(
other.gate, identity.IdentityGate
):
pass
elif (
isinstance(other, Iterable)
and not isinstance(other, str)
and not isinstance(other, linear_combinations.PauliSum)
):
if sign == +1:
other = reversed(list(other))
for item in other:
if self._imul_helper(cast(PAULI_STRING_LIKE, item), sign) is NotImplemented:
return NotImplemented
else:
return NotImplemented
return self
def _imul_helper_checkpoint(self, other: 'cirq.PAULI_STRING_LIKE', sign: int):
"""Like `_imul_helper` but guarantees no-op on error."""
if not isinstance(other, (numbers.Number, PauliString, MutablePauliString)):
other = MutablePauliString()._imul_helper(other, sign)
if other is NotImplemented:
return NotImplemented
return self._imul_helper(other, sign)
def inplace_left_multiply_by(
self, other: 'cirq.PAULI_STRING_LIKE'
) -> 'cirq.MutablePauliString':
"""Left-multiplies a pauli string into this pauli string.
Args:
other: A pauli string or `cirq.PAULI_STRING_LIKE` to left-multiply
into `self`.
Returns:
The `self` mutable pauli string that was mutated.
Raises:
TypeError: `other` was not a `cirq.PAULI_STRING_LIKE`. `self`
was not mutated.
"""
if self._imul_helper_checkpoint(other, -1) is NotImplemented:
raise TypeError(f"{other!r} is not cirq.PAULI_STRING_LIKE.")
return self
def _json_dict_(self) -> Dict[str, Any]:
return {
# JSON requires mappings to have string keys.
'pauli_int_dict': list(self.pauli_int_dict.items()),
'coefficient': self.coefficient,
}
@classmethod
def _from_json_dict_(cls, pauli_int_dict, coefficient, **kwargs):
return cls(pauli_int_dict=dict(pauli_int_dict), coefficient=coefficient)
def inplace_right_multiply_by(
self, other: 'cirq.PAULI_STRING_LIKE'
) -> 'cirq.MutablePauliString':
"""Right-multiplies a pauli string into this pauli string.
Args:
other: A pauli string or `cirq.PAULI_STRING_LIKE` to right-multiply
into `self`.
Returns:
The `self` mutable pauli string that was mutated.
Raises:
TypeError: `other` was not a `cirq.PAULI_STRING_LIKE`. `self`
was not mutated.
"""
if self._imul_helper_checkpoint(other, +1) is NotImplemented:
raise TypeError(f"{other!r} is not cirq.PAULI_STRING_LIKE.")
return self
def __neg__(self) -> 'cirq.MutablePauliString':
result = self.mutable_copy()
result.coefficient *= -1
return result
def __pos__(self) -> 'cirq.MutablePauliString':
return self.mutable_copy()
def transform_qubits(
self, func: Callable[[TKey], TKeyNew], *, inplace: bool = False
) -> 'cirq.MutablePauliString[TKeyNew]':
"""Returns a mutable pauli string with transformed qubits.
Args:
func: The qubit transformation to apply.
inplace: If false (the default), creates a new mutable pauli string
to store the result. If true, overwrites this mutable pauli
string's contents. Defaults to false for consistency with
`cirq.PauliString.transform_qubits` in situations where the
pauli string being used may or may not be mutable.
Returns:
A transformed MutablePauliString.
If inplace=True, returns `self`.
If inplace=False, returns a new instance.
"""
new_dict = {func(q): p for q, p in self.pauli_int_dict.items()}
if not inplace:
return MutablePauliString(
coefficient=self.coefficient,
pauli_int_dict=new_dict,
)
result = cast('cirq.MutablePauliString[TKeyNew]', self)
result.pauli_int_dict = new_dict
return result
def __imul__(self, other: 'cirq.PAULI_STRING_LIKE') -> 'cirq.MutablePauliString':
"""Left-multiplies a pauli string into this pauli string.
Args:
other: A pauli string or `cirq.PAULI_STRING_LIKE` to left-multiply
into `self`.
Returns:
The `self` mutable pauli string that was successfully mutated.
If `other` is not a `cirq.PAULI_STRING_LIKE`, `self` is not mutated
and `NotImplemented` is returned.
"""
return self._imul_helper_checkpoint(other, +1)
def __mul__(self, other: 'cirq.PAULI_STRING_LIKE') -> 'cirq.PauliString':
"""Multiplies two pauli-string-likes together.
The result is not mutable.
"""
return self.frozen() * other
def __rmul__(self, other: 'cirq.PAULI_STRING_LIKE') -> 'cirq.PauliString':
"""Multiplies two pauli-string-likes together.
The result is not mutable.
"""
return other * self.frozen()
def __str__(self) -> str:
return f'mutable {self.frozen()}'
def __repr__(self) -> str:
return f'{self.frozen()!r}.mutable_copy()'
def _decompose_into_cliffords(op: 'cirq.Operation') -> List['cirq.Operation']:
# An operation that can be ignored?
if isinstance(op.gate, global_phase_op.GlobalPhaseGate):
return []
# Already a known Clifford?
if isinstance(
op.gate,
(clifford_gate.SingleQubitCliffordGate, pauli_interaction_gate.PauliInteractionGate),
):
return [op]
# Specifies a decomposition into Cliffords?
v = getattr(op, '_decompose_into_clifford_', None)
if v is not None:
result = v()
if result is not None and result is not NotImplemented:
return list(op_tree.flatten_to_ops(result))
# Specifies a decomposition that happens to contain only Cliffords?
decomposed = protocols.decompose_once(op, None)
if decomposed is not None:
return [out for sub_op in decomposed for out in _decompose_into_cliffords(sub_op)]
raise TypeError(
f'Operation is not a known Clifford and did not decompose into known Cliffords: {op!r}'
)
def _pass_operation_over(
pauli_map: Dict[TKey, pauli_gates.Pauli],
op: 'cirq.Operation',
after_to_before: bool = False,
) -> bool:
if isinstance(op, gate_operation.GateOperation):
gate = op.gate
if isinstance(gate, clifford_gate.SingleQubitCliffordGate):
return _pass_single_clifford_gate_over(
pauli_map, gate, cast(TKey, op.qubits[0]), after_to_before=after_to_before
)
if isinstance(gate, pauli_interaction_gate.PauliInteractionGate):
return _pass_pauli_interaction_gate_over(
pauli_map,
gate,
cast(TKey, op.qubits[0]),
cast(TKey, op.qubits[1]),
after_to_before=after_to_before,
)
raise NotImplementedError(f'Unsupported operation: {op!r}')
def _pass_single_clifford_gate_over(
pauli_map: Dict[TKey, pauli_gates.Pauli],
gate: clifford_gate.SingleQubitCliffordGate,
qubit: TKey,
after_to_before: bool = False,
) -> bool:
if qubit not in pauli_map:
return False # coverage: ignore
if not after_to_before:
gate **= -1
pauli, inv = gate.transform(pauli_map[qubit])
pauli_map[qubit] = pauli
return inv
def _pass_pauli_interaction_gate_over(
pauli_map: Dict[TKey, pauli_gates.Pauli],
gate: pauli_interaction_gate.PauliInteractionGate,
qubit0: TKey,
qubit1: TKey,
after_to_before: bool = False,
) -> bool:
def merge_and_kickback(
qubit: TKey,
pauli_left: Optional[pauli_gates.Pauli],
pauli_right: Optional[pauli_gates.Pauli],
inv: bool,
) -> int:
assert pauli_left is not None or pauli_right is not None
if pauli_left is None or pauli_right is None:
pauli_map[qubit] = cast(pauli_gates.Pauli, pauli_left or pauli_right)
return 0
if pauli_left == pauli_right:
del pauli_map[qubit]
return 0
pauli_map[qubit] = pauli_left.third(pauli_right)
if (pauli_left < pauli_right) ^ after_to_before:
return int(inv) * 2 + 1
return int(inv) * 2 - 1
quarter_kickback = 0
if qubit0 in pauli_map and not protocols.commutes(pauli_map[qubit0], gate.pauli0):
quarter_kickback += merge_and_kickback(
qubit1, gate.pauli1, pauli_map.get(qubit1), gate.invert1
)
if qubit1 in pauli_map and not protocols.commutes(pauli_map[qubit1], gate.pauli1):
quarter_kickback += merge_and_kickback(
qubit0, pauli_map.get(qubit0), gate.pauli0, gate.invert0
)
assert (
quarter_kickback % 2 == 0
), 'Impossible condition. quarter_kickback is either incremented twice or never.'
return quarter_kickback % 4 == 2
# Mypy has extreme difficulty with these constants for some reason.
_i = cast(identity.IdentityGate, identity.I) # type: ignore
_x = cast(pauli_gates.Pauli, pauli_gates.X) # type: ignore
_y = cast(pauli_gates.Pauli, pauli_gates.Y) # type: ignore
_z = cast(pauli_gates.Pauli, pauli_gates.Z) # type: ignore
PAULI_GATE_LIKE_TO_INDEX_MAP: Dict['cirq.PAULI_GATE_LIKE', int] = {
_i: 0,
_x: 1,
_y: 2,
_z: 3,
'I': 0,
'X': 1,
'Y': 2,
'Z': 3,
'i': 0,
'x': 1,
'y': 2,
'z': 3,
0: 0,
1: 1,
2: 2,
3: 3,
}
_INT_TO_PAULI: List[Union['cirq.Pauli', 'cirq.IdentityGate']] = [_i, _x, _y, _z]
PAULI_GATE_LIKE_TO_GATE_MAP: Dict[
'cirq.PAULI_GATE_LIKE', Union['cirq.Pauli', 'cirq.IdentityGate']
] = {k: _INT_TO_PAULI[v] for k, v in PAULI_GATE_LIKE_TO_INDEX_MAP.items()}
def _pauli_like_to_pauli_int(key: Any, pauli_gate_like: PAULI_GATE_LIKE):
pauli_int = PAULI_GATE_LIKE_TO_INDEX_MAP.get(pauli_gate_like, None)
if pauli_int is None:
raise TypeError(
f'Expected {key!r}: {pauli_gate_like!r} to have a '
f'cirq.PAULI_GATE_LIKE value. '
f"But the value isn't in "
f"{list(PAULI_GATE_LIKE_TO_INDEX_MAP.values())!r}"
)
return cast(int, pauli_int)
| 37.134665
| 100
| 0.615776
|
407f7e39dbbcc98cf2b3e80f4d9786a437cead7f
| 2,841
|
py
|
Python
|
scripts/scrape_course_catalog.py
|
SeanFoley123/htl-lab1
|
32eaf17c90fcfebd04276d144375dc2f92af2f60
|
[
"MIT"
] | null | null | null |
scripts/scrape_course_catalog.py
|
SeanFoley123/htl-lab1
|
32eaf17c90fcfebd04276d144375dc2f92af2f60
|
[
"MIT"
] | null | null | null |
scripts/scrape_course_catalog.py
|
SeanFoley123/htl-lab1
|
32eaf17c90fcfebd04276d144375dc2f92af2f60
|
[
"MIT"
] | null | null | null |
import re
import urllib
import pandas as pd
import requests
import yaml
from bs4 import BeautifulSoup
#%%
## Configuration
# start here:
start_url = 'http://www.olin.edu/course-listing/'
# actually start on all these pages:
# TODO a more robust implementation would look for the ?page links on the original start page
start_urls = [start_url] + [start_url + '?page=%d' % i for i in range(1, 8)]
# download and parse all the pages
pages = [BeautifulSoup(requests.get(u).text, 'lxml') for u in start_urls]
# collect all the <a href="xxx"/> targets
hrefs = {a.attrs['href'] for page in pages for a in page.find_all('a')}
# select only those href targets whose URLs look like course pages:
# /course-listing/, followed by three or four letters, and optional hypen, three or four digits,
# an optional letter, a hypen, and after that we don't care.
course_urls = {urllib.parse.urljoin(start_url, href) for href in hrefs if re.match(r'\/course-listing\/[a-z]{3,4}-?\d{3,4}[a-z]?-', href)}
# download and parse all the course pages
course_pages = {u: BeautifulSoup(requests.get(u).text, 'lxml') for u in course_urls}
#%%
def parse_page(course_url, html):
field_names = ['course-title', 'course-credits', 'course-hours', 'recommended-requisites', 'course-contact', 'course-description']
field_elements = {field_name: html.select('.' + field_name) for field_name in field_names}
field_text = {field_name: elts[0].text.strip() if elts else None for field_name, elts in field_elements.items()}
course_number, course_name = re.match(r'(.+) - (.+)', field_text['course-title']).groups()
course_credits = re.match(r'Credits:\s*(.+)', field_text['course-credits']).group(1)
course_hours = field_text['course-hours'] and re.match(r'Hours:\s*(.+)', field_text['course-hours']).group(1)
course_contact = field_text['course-contact'] and re.match(r'For information contact:\s*(.*)', field_text['course-contact']).group(1)
course_description = ('\n'.join(e.text for e in field_elements['course-description'][0].next_siblings if not isinstance(e, str)).strip()
if field_elements['course-description'] else None)
return {
'course_url' : course_url,
'course_number' : course_number,
'course_name' : course_name,
'course_credits' : course_credits,
'course_hours' : course_hours,
'course_contact' : course_contact,
'course_description': course_description,
}
df = pd.DataFrame.from_records([parse_page(course_url, html) for course_url, html in course_pages.items()])
df['course_area'] = df.course_number.str.extract(r'^([A-Z]+)', expand=False)
df.set_index('course_number', inplace=True)
df.sort_index(inplace=True)
df.head()
#%%
with open('./data/olin-courses-16-17.csv', 'w') as f:
df.to_csv(f)
| 42.402985
| 140
| 0.690954
|
e48295c1e8cd1f4258ad7a75573006dfacfaa75c
| 543
|
py
|
Python
|
angr/procedures/libc/atoi.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 2
|
2018-05-02T17:41:36.000Z
|
2020-05-18T02:49:16.000Z
|
angr/procedures/libc/atoi.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/libc/atoi.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 1
|
2022-02-03T20:06:20.000Z
|
2022-02-03T20:06:20.000Z
|
import angr
from angr.sim_type import SimTypeString, SimTypeInt
import logging
l = logging.getLogger("angr.procedures.libc.atoi")
class atoi(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, s):
#pylint:disable=attribute-defined-outside-init
self.argument_types = {0: self.ty_ptr(SimTypeString())}
self.return_type = SimTypeInt(self.state.arch, True)
strtol = angr.SIM_PROCEDURES['libc']['strtol']
return strtol.strtol_inner(s, self.state, self.state.memory, 10, True)[1]
| 28.578947
| 81
| 0.709024
|
8b53cbf7c6a16e2f218ab11c2271022308dcca86
| 1,851
|
py
|
Python
|
src/models/losses/hierarchical.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 8
|
2021-10-12T05:39:20.000Z
|
2022-03-31T10:55:01.000Z
|
src/models/losses/hierarchical.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 1
|
2022-03-30T19:23:42.000Z
|
2022-03-30T19:23:42.000Z
|
src/models/losses/hierarchical.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 5
|
2021-11-17T07:38:28.000Z
|
2022-01-31T10:46:36.000Z
|
import numpy as np
import torch
import torch.nn as nn
from src.registry import LOSSES
__all__ = ['HierarchicalCrossEntropyLoss']
@LOSSES.register_class
class HierarchicalCrossEntropyLoss(nn.Module):
__constants__ = ['reduction']
def __init__(self, class_graph, num_leaf_classes, reduction='mean'):
super().__init__()
if isinstance(class_graph, str):
class_graph = torch.load(class_graph)
self.class_graph = class_graph
self.bce = torch.nn.BCELoss(reduction='sum')
self.reduction = reduction
self.mapping = []
for i in range(num_leaf_classes):
self.mapping.append(torch.tensor([i]))
for i, arr in sorted(class_graph.adjacency()):
if i >= num_leaf_classes:
arr = np.fromiter(arr, dtype=int)
arr = sorted(arr[arr < num_leaf_classes])
self.mapping.append(torch.tensor(arr))
def forward(self, input, target):
probs = torch.softmax(input, dim=1)
losses = []
for i, tg in enumerate(target):
loc_probs = [probs[i, self.mapping[tg]].sum()]
for j in self.class_graph.predecessors(tg.item()):
loc_probs.append(probs[i, self.mapping[j]].sum())
loc_probs = torch.clip(torch.stack(loc_probs), min=0, max=1)
loc_loss = self.bce(loc_probs, torch.ones_like(loc_probs))
losses.append(loc_loss)
loss = torch.stack(losses)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def to(self, *args, **kwargs):
for i, item in enumerate(self.mapping):
self.mapping[i] = item.to(*args, **kwargs)
return super(HierarchicalCrossEntropyLoss, self).to(*args, **kwargs)
| 34.924528
| 76
| 0.606699
|
048c9c3147a1499288635f14d285fc26fcabf19c
| 2,221
|
py
|
Python
|
tests/models/v2/initial_async_response_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
tests/models/v2/initial_async_response_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
tests/models/v2/initial_async_response_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.v2.initial_async_response import InitialAsyncResponse
class InitialAsyncResponseTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_initial_async_response(self):
initial_async_response_obj = InitialAsyncResponse()
self.assertNotEqual(initial_async_response_obj, None)
| 58.447368
| 845
| 0.777578
|
8f0ad659d016723d47f0e08613a6787d0b81d974
| 2,387
|
py
|
Python
|
jw/gen.py
|
JoonSeongLee/dss7-coupon
|
b2d01fbdfce10bca56b0339fca7e7dff7a1feb4f
|
[
"MIT"
] | null | null | null |
jw/gen.py
|
JoonSeongLee/dss7-coupon
|
b2d01fbdfce10bca56b0339fca7e7dff7a1feb4f
|
[
"MIT"
] | null | null | null |
jw/gen.py
|
JoonSeongLee/dss7-coupon
|
b2d01fbdfce10bca56b0339fca7e7dff7a1feb4f
|
[
"MIT"
] | null | null | null |
from ml_config import *
def coupon():
return pd.read_csv('../data/coupon_list_train.csv',memory_map=True,index_col=0)
def user():
return pd.read_csv('../data/user_list.csv',memory_map=True,index_col=0)
def view(reset_index=True):
tmp = pd.read_csv('../data/coupon_visit_train.csv',memory_map=True,index_col=0)
return tmp.reset_index() if reset_index else tmp
def purchase():
return pd.read_csv('../data/coupon_detail_train.csv',memory_map=True,index_col=0)
def area():
return pd.read_csv('../data/coupon_area_train.csv',memory_map=True,index_col=0)
def pref():
return pd.read_csv('../data/prefecture_locations.csv',memory_map=True,index_col=0)
# --------------------------------------------------------------------------------------------
# version 1 for coupon_list
def coupon_pref():
return pd.merge(coupon(), pref(), left_on='ken_name', right_on='PREF_NAME')
# version 2 for coupon_list
def coupon_area_pref():
return pd.merge(coupon_pref(), area(), left_on=['COUPON_ID_hash','PREF_NAME','small_area_name'], right_on=['COUPON_ID_hash','PREF_NAME','SMALL_AREA_NAME'])
def view_purchase():
return pd.merge(view(), purchase(), left_on=['VIEW_COUPON_ID_hash','PURCHASEID_hash','USER_ID_hash'],right_on=['COUPON_ID_hash','PURCHASEID_hash','USER_ID_hash'])
# --------------------------------------------------------------------------------------------
def user_view_coupon_pref():
uv = pd.merge(user(),view())
return pd.merge(uv, coupon_pref(), left_on='VIEW_COUPON_ID_hash', right_on='COUPON_ID_hash')
def user_view_coupon_area_pref():
uv = pd.merge(user(),view())
return pd.merge(uv, coupon_area_pref(), left_on='VIEW_COUPON_ID_hash', right_on='COUPON_ID_hash')
def user_purchase_coupon_pref():
up = pd.merge(user(), purchase())
return pd.merge(up, coupon_pref(), on='COUPON_ID_hash')
def user_purchase_coupon_area_pref():
up = pd.merge(user(), purchase())
return pd.merge(up, coupon_area_pref(), on='COUPON_ID_hash')
def user_vp_coupon_pref():
uvp = pd.merge(user(),view_purchase(), on='USER_ID_hash')
return pd.merge(uvp, coupon_pref(), left_on='VIEW_COUPON_ID_hash',right_on='COUPON_ID_hash')
def user_vp_coupon_area_pref():
uvp = pd.merge(user(),view_purchase(), on='USER_ID_hash')
return pd.merge(uvp, coupon_area_pref(), left_on='VIEW_COUPON_ID_hash',right_on='COUPON_ID_hash')
| 40.457627
| 166
| 0.675744
|
55dc8f19f49e6780409fa8bc177482ccb3308f38
| 10,933
|
py
|
Python
|
scripts/irods/controller.py
|
mbwatson/irods
|
c242db4cb20155500767d76b62bcbc4d4032b6cf
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/irods/controller.py
|
mbwatson/irods
|
c242db4cb20155500767d76b62bcbc4d4032b6cf
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/irods/controller.py
|
mbwatson/irods
|
c242db4cb20155500767d76b62bcbc4d4032b6cf
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import contextlib
import copy
import glob
import itertools
import json
import logging
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import psutil
from . import six
from .configuration import IrodsConfig
from . import lib
from . import paths
from . import upgrade_configuration
from .exceptions import IrodsError, IrodsWarning
class IrodsController(object):
def __init__(self, irods_config=IrodsConfig()):
self.config = irods_config
def check_config(self):
# load the configuration to ensure it exists
_ = self.config.server_config
_ = self.config.version
def define_log_levels(self, logger):
config = self.config.server_config
# If the log levels are not defined, then add them to server config!
if 'log_level' not in config or 'server' not in config['log_level']:
logger.debug('Adding log levels to server configuration ...')
lib.update_json_file_from_dict(paths.server_config_path(), {
'log_level': {
'agent': 'info',
'agent_factory': 'info',
'api': 'info',
'authentication': 'info',
'database': 'info',
'delay_server': 'info',
'legacy': 'info',
'microservice': 'info',
'network': 'info',
'resource': 'info',
'rule_engine': 'info',
'server': 'info'
}
})
def start(self):
l = logging.getLogger(__name__)
l.debug('Calling start on IrodsController')
if upgrade_configuration.requires_upgrade(self.config):
upgrade_configuration.upgrade(self.config)
self.define_log_levels(l)
try:
self.config.validate_configuration()
except IrodsWarning:
l.warn('Warning encountered in validation:', exc_info=True)
if self.get_binary_to_pids_dict():
raise IrodsError('iRODS already running')
self.config.clear_cache()
if not os.path.exists(self.config.server_executable):
raise IrodsError(
'Configuration problem:\n'
'\tThe \'%s\' application could not be found.' % (
os.path.basename(self.config.server_executable)))
try:
(test_file_handle, test_file_name) = tempfile.mkstemp(
dir=self.config.log_directory)
os.close(test_file_handle)
os.unlink(test_file_name)
except (IOError, OSError):
six.reraise(IrodsError, IrodsError(
'Configuration problem:\n'
'The server log directory, \'%s\''
'is not writeable.' % (
self.config.log_directory)),
sys.exc_info()[2])
for f in ['core.re', 'core.dvm', 'core.fnm']:
path = os.path.join(self.config.config_directory, f)
if not os.path.exists(path):
shutil.copyfile(paths.get_template_filepath(path), path)
try:
irods_port = int(self.config.server_config['zone_port'])
l.debug('Attempting to bind socket %s', irods_port)
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', irods_port))
except socket.error:
six.reraise(IrodsError,
IrodsError('Could not bind port {0}.'.format(irods_port)),
sys.exc_info()[2])
l.debug('Socket %s bound and released successfully.')
if self.config.is_catalog:
from . import database_interface
database_interface.server_launch_hook(self.config)
l.info('Starting iRODS server...')
lib.execute_command(
[self.config.server_executable],
cwd=self.config.server_bin_directory,
env=self.config.execution_environment)
try_count = 1
max_retries = 100
while True:
l.debug('Attempting to connect to iRODS server on port %s. Attempt #%s',
irods_port, try_count)
with contextlib.closing(socket.socket(
socket.AF_INET, socket.SOCK_STREAM)) as s:
if s.connect_ex(('127.0.0.1', irods_port)) == 0:
l.debug('Successfully connected to port %s.', irods_port)
if len(lib.get_pids_executing_binary_file(self.config.server_executable)) == 0:
raise IrodsError('iRODS port is bound, but server is not started.')
s.send(b'\x00\x00\x00\x33<MsgHeader_PI><type>HEARTBEAT</type></MsgHeader_PI>')
message = s.recv(256)
if message != b'HEARTBEAT':
raise IrodsError('iRODS port returned non-heartbeat message:\n{0}'.format(message))
break
if try_count >= max_retries:
raise IrodsError('iRODS server failed to start.')
try_count += 1
time.sleep(1)
except IrodsError as e:
l.info('Failure')
six.reraise(IrodsError, e, sys.exc_info()[2])
l.info('Success')
def irods_grid_shutdown(self, timeout=20, **kwargs):
l = logging.getLogger(__name__)
args = ['irods-grid', 'shutdown', '--hosts={0}'.format(lib.get_hostname())]
if 'IRODS_ENVIRONMENT_FILE' in self.config.execution_environment:
kwargs['env'] = copy.copy(os.environ)
kwargs['env']['IRODS_ENVIRONMENT_FILE'] = self.config.execution_environment['IRODS_ENVIRONMENT_FILE']
start_time = time.time()
lib.execute_command_timeout(args, timeout=timeout, **kwargs)
# "irods-grid shutdown" is non-blocking
while time.time() < start_time + timeout:
if self.get_binary_to_pids_dict([self.config.server_executable]):
time.sleep(0.3)
else:
break
else:
raise IrodsError(
'The iRODS server did not stop within {0} seconds of '
'receiving the "shutdown" command.'.format(timeout))
def stop(self, timeout=20):
l = logging.getLogger(__name__)
self.config.clear_cache()
l.debug('Calling stop on IrodsController')
l.info('Stopping iRODS server...')
try:
if self.get_binary_to_pids_dict([self.config.server_executable]):
try:
self.irods_grid_shutdown(timeout=timeout)
except Exception as e:
l.error('Error encountered in graceful shutdown.')
l.debug('Exception:', exc_info=True)
else:
l.warning('No iRODS servers running.')
# kill servers first to stop spawning of other processes
server_pids_dict = self.get_binary_to_pids_dict([self.config.server_executable])
if server_pids_dict:
l.warning('iRODS server processes remain after "irods-grid shutdown".')
l.warning(format_binary_to_pids_dict(server_pids_dict))
l.warning('Killing forcefully...')
for pid in server_pids_dict[self.config.server_executable]:
l.warning('Killing %s, pid %s', self.config.server_executable, pid)
try:
lib.kill_pid(pid)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
delete_cache_files_by_pid(pid)
binary_to_pids_dict = self.get_binary_to_pids_dict()
if binary_to_pids_dict:
l.warning('iRODS child processes remain after "irods-grid shutdown".')
l.warning(format_binary_to_pids_dict(binary_to_pids_dict))
l.warning('Killing forcefully...')
for binary, pids in binary_to_pids_dict.items():
for pid in pids:
l.warning('Killing %s, pid %s', binary, pid)
try:
lib.kill_pid(pid)
except psutil.NoSuchProcess:
pass
delete_cache_files_by_pid(pid)
except IrodsError as e:
l.info('Failure')
six.reraise(IrodsError, e, sys.exc_info()[2])
l.info('Success')
def restart(self):
l = logging.getLogger(__name__)
l.debug('Calling restart on IrodsController')
self.stop()
self.start()
def status(self):
l = logging.getLogger(__name__)
l.debug('Calling status on IrodsController')
self.config.clear_cache()
binary_to_pids_dict = self.get_binary_to_pids_dict()
if not binary_to_pids_dict:
l.info('No iRODS servers running.')
else:
l.info(format_binary_to_pids_dict(binary_to_pids_dict))
return binary_to_pids_dict
def get_binary_to_pids_dict(self, binaries=None):
if binaries is None:
binaries = [
self.config.server_executable,
self.config.rule_engine_executable]
d = {}
for b in binaries:
pids = lib.get_pids_executing_binary_file(b)
if pids:
d[b] = pids
return d
def format_binary_to_pids_dict(d):
text_list = []
for binary, pids in d.items():
text_list.append('{0} :\n{1}'.format(
os.path.basename(binary),
lib.indent(*['Process {0}'.format(pid) for pid in pids])))
return '\n'.join(text_list)
def delete_cache_files_by_pid(pid):
l = logging.getLogger(__name__)
l.debug('Deleting cache files for pid %s...', pid)
ubuntu_cache = glob.glob(os.path.join(
paths.root_directory(),
'var',
'run',
'shm',
'*irods_re_cache*pid{0}_*'.format(pid)))
delete_cache_files_by_name(*ubuntu_cache)
other_linux_cache = glob.glob(os.path.join(
paths.root_directory(),
'dev',
'shm',
'*irods_re_cache*pid{0}_*'.format(pid)))
delete_cache_files_by_name(*other_linux_cache)
def delete_cache_files_by_name(*filepaths):
l = logging.getLogger(__name__)
for path in filepaths:
try:
l.debug('Deleting %s', path)
os.unlink(path)
except (IOError, OSError):
l.warning(lib.indent('Error deleting cache file: %s'), path)
| 39.046429
| 113
| 0.566999
|
5c12afef44c982f639b868307af30835a3975140
| 800
|
py
|
Python
|
test/config/metrics_test/trial.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 3
|
2021-01-05T07:41:58.000Z
|
2021-01-11T02:08:01.000Z
|
test/config/metrics_test/trial.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 21
|
2020-11-13T19:01:01.000Z
|
2022-02-27T09:12:51.000Z
|
test/config/metrics_test/trial.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 3
|
2019-01-02T06:15:50.000Z
|
2019-01-30T14:31:20.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import time
import json
import argparse
import nni
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dict_metrics", action='store_true')
args = parser.parse_args()
if args.dict_metrics:
result_file = 'expected_metrics_dict.json'
else:
result_file = 'expected_metrics.json'
nni.get_next_parameter()
with open(result_file, 'r') as f:
m = json.load(f)
for v in m['intermediate_result']:
time.sleep(1)
print('report_intermediate_result:', v)
nni.report_intermediate_result(v)
time.sleep(1)
print('report_final_result:', m['final_result'])
nni.report_final_result(m['final_result'])
print('done')
| 26.666667
| 62
| 0.67875
|
a3b027a36285a19096345675eaf5a8945f2b7f56
| 938
|
py
|
Python
|
chainerrl/policies/mellowmax_policy.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 18
|
2018-08-07T07:27:41.000Z
|
2018-08-20T01:51:21.000Z
|
chainerrl/policies/mellowmax_policy.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | null | null | null |
chainerrl/policies/mellowmax_policy.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 2
|
2018-08-16T06:47:26.000Z
|
2018-08-20T01:51:22.000Z
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from logging import getLogger
import chainer
from chainerrl import distribution
from chainerrl.policy import Policy
logger = getLogger(__name__)
class MellowmaxPolicy(chainer.Chain, Policy):
"""Mellowmax policy.
See: http://arxiv.org/abs/1612.05628
Args:
model (chainer.Link):
Link that is callable and outputs action values.
omega (float):
Parameter of the mellowmax function.
"""
def __init__(self, model, omega=1.):
self.omega = omega
super().__init__(model=model)
def __call__(self, x):
h = self.model(x)
return distribution.MellowmaxDistribution(h, omega=self.omega)
| 24.051282
| 70
| 0.715352
|
009a837286a5013b9cc3da2bb5e58d50e08c4d05
| 416
|
py
|
Python
|
app.py
|
Wylarel/Graycraft
|
8426c083875ad036d9b5efdf097643993757c8b1
|
[
"Unlicense"
] | 1
|
2020-06-25T22:10:56.000Z
|
2020-06-25T22:10:56.000Z
|
app.py
|
Wylarel/Graycraft
|
8426c083875ad036d9b5efdf097643993757c8b1
|
[
"Unlicense"
] | null | null | null |
app.py
|
Wylarel/Graycraft
|
8426c083875ad036d9b5efdf097643993757c8b1
|
[
"Unlicense"
] | null | null | null |
from PIL import Image
import os
files = []
print("Listing files . . .")
for r, d, f in os.walk('assets\\minecraft\\textures\\'):
for file in f:
if file.endswith(".png"):
files.append(os.path.join(r, file))
max = len(files)
for f in files:
print("Editing file - " + str(files.index(f)) + "/" + str(max) + " - " + f.split("\\")[-1])
img = Image.open(f).convert('LA')
img.save(f)
| 24.470588
| 95
| 0.560096
|
9b85095faba65aec92c47c52048c4e63c4066ca1
| 683
|
py
|
Python
|
app/core/migrations/0002_tag.py
|
rahulnegi20/recipie-api
|
9e99d3c6803fe87cb64eae6980e9e0817f643f5f
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
rahulnegi20/recipie-api
|
9e99d3c6803fe87cb64eae6980e9e0817f643f5f
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
rahulnegi20/recipie-api
|
9e99d3c6803fe87cb64eae6980e9e0817f643f5f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-04-23 14:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333
| 118
| 0.616398
|
6d447a4935e49cd3c4f7525fff6ffe5e9883656e
| 6,290
|
py
|
Python
|
youtube_dl/extractor/myvideo.py
|
LyleH/youtube-dl
|
7564b09ef5c09454908f78cb91c3bd2d6daacac5
|
[
"Unlicense"
] | 10
|
2020-05-29T03:20:03.000Z
|
2022-03-29T01:05:20.000Z
|
youtube_dl/extractor/myvideo.py
|
huyangfeng/youtobedl
|
7b0d1c28597bd38567e5b4e853f669a5a601c6e8
|
[
"Unlicense"
] | 1
|
2016-05-18T01:27:28.000Z
|
2016-05-18T05:00:36.000Z
|
PythonSamples/library/files/lib/python2.7/site-packages/youtube_dl/extractor/myvideo.py
|
jianglei12138/python2.7
|
280aa96d8cac98c03ca8c8ed71541f7ff7817055
|
[
"PSF-2.0"
] | 9
|
2020-05-29T03:21:02.000Z
|
2021-04-14T03:26:05.000Z
|
from __future__ import unicode_literals
import binascii
import base64
import hashlib
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_ord,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
sanitized_Request,
)
class MyVideoIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/(?P<id>[0-9]+)/[^?/]+.*'
IE_NAME = 'myvideo'
_TEST = {
'url': 'http://www.myvideo.de/watch/8229274/bowling_fail_or_win',
'md5': '2d2753e8130479ba2cb7e0a37002053e',
'info_dict': {
'id': '8229274',
'ext': 'flv',
'title': 'bowling-fail-or-win',
}
}
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
# Released into the Public Domain by Tristan Fischer on 2013-05-19
# https://github.com/rg3/youtube-dl/pull/842
def __rc4crypt(self, data, key):
x = 0
box = list(range(256))
for i in list(range(256)):
x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = 0
y = 0
out = ''
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
return out
def __md5(self, s):
return hashlib.md5(s).hexdigest().encode()
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
GK = (
b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
b'TnpsbA0KTVRkbU1tSTRNdz09'
)
# Get video webpage
webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
webpage = self._download_webpage(webpage_url, video_id)
mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
if mobj is not None:
self.report_extraction(video_id)
video_url = mobj.group(1) + '.flv'
video_title = self._html_search_regex('<title>([^<]+)</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'title': video_title,
}
mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage)
if mobj is not None:
request = sanitized_Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
response = self._download_webpage(request, video_id,
'Downloading video info')
info = json.loads(base64.b64decode(response).decode('utf-8'))
return {
'id': video_id,
'title': info['title'],
'url': info['streaming_url'].replace('rtmpe', 'rtmpt'),
'play_path': info['filename'],
'ext': 'flv',
'thumbnail': info['thumbnail'][0]['url'],
}
# try encxml
mobj = re.search('var flashvars={(.+?)}', webpage)
if mobj is None:
raise ExtractorError('Unable to extract video')
params = {}
encxml = ''
sec = mobj.group(1)
for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
if not a == '_encxml':
params[a] = b
else:
encxml = compat_urllib_parse_unquote(b)
if not params.get('domain'):
params['domain'] = 'www.myvideo.de'
xmldata_url = '%s?%s' % (encxml, compat_urllib_parse_urlencode(params))
if 'flash_playertype=MTV' in xmldata_url:
self._downloader.report_warning('avoiding MTV player')
xmldata_url = (
'http://www.myvideo.de/dynamic/get_player_video_xml.php'
'?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
) % video_id
# get enc data
enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
enc_data_b = binascii.unhexlify(enc_data)
sk = self.__md5(
base64.b64decode(base64.b64decode(GK)) +
self.__md5(
str(video_id).encode('utf-8')
)
)
dec_data = self.__rc4crypt(enc_data_b, sk)
# extracting infos
self.report_extraction(video_id)
video_url = None
mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
if mobj:
video_url = compat_urllib_parse_unquote(mobj.group(1))
if 'myvideo2flash' in video_url:
self.report_warning(
'Rewriting URL to use unencrypted rtmp:// ...',
video_id)
video_url = video_url.replace('rtmpe://', 'rtmp://')
if not video_url:
# extract non rtmp videos
mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
if mobj is None:
raise ExtractorError('unable to extract url')
video_url = compat_urllib_parse_unquote(mobj.group(1)) + compat_urllib_parse_unquote(mobj.group(2))
video_file = self._search_regex('source=\'(.*?)\'', dec_data, 'video file')
video_file = compat_urllib_parse_unquote(video_file)
if not video_file.endswith('f4m'):
ppath, prefix = video_file.split('.')
video_playpath = '%s:%s' % (prefix, ppath)
else:
video_playpath = ''
video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, 'swfobj')
video_swfobj = compat_urllib_parse_unquote(video_swfobj)
video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'tc_url': video_url,
'title': video_title,
'ext': 'flv',
'play_path': video_playpath,
'player_url': video_swfobj,
}
| 35.337079
| 111
| 0.534817
|
19ef09f81223f3148d0f8f0b8499e1f88648ab74
| 7,826
|
py
|
Python
|
now/utils/bytecode/interpreter.py
|
CrystalMei/Prov_Build
|
695576c36b7d5615f1cc568954658f8a7ce9eeba
|
[
"MIT"
] | 2
|
2017-11-10T16:17:11.000Z
|
2021-12-19T18:43:22.000Z
|
now/utils/bytecode/interpreter.py
|
CrystalMei/Prov_Build
|
695576c36b7d5615f1cc568954658f8a7ce9eeba
|
[
"MIT"
] | null | null | null |
now/utils/bytecode/interpreter.py
|
CrystalMei/Prov_Build
|
695576c36b7d5615f1cc568954658f8a7ce9eeba
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# Copyright (c) 2018, 2019, 2020 President and Fellows of Harvard College.
# This file is part of ProvBuild.
"""Define bytecode interpreter that supports iteration on bytecode"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import dis
from dis import opmap
from collections import defaultdict
from opcode import HAVE_ARGUMENT, cmp_op
from .instructions import Instruction
CALL_FUNCTIONS = {
opmap["CALL_FUNCTION"], opmap["CALL_FUNCTION_VAR"],
opmap["CALL_FUNCTION_KW"], opmap["CALL_FUNCTION_VAR_KW"]
}
PRINT_ITEMS = set()
if "PRINT_ITEM" in opmap:
PRINT_ITEMS.add(opmap["PRINT_ITEM"])
PRINT_ITEMS.add(opmap["PRINT_ITEM_TO"])
PRINT_NEW_LINES = set()
if "PRINT_NEWLINE" in opmap:
PRINT_NEW_LINES.add(opmap["PRINT_NEWLINE"])
PRINT_NEW_LINES.add(opmap["PRINT_NEWLINE_TO"])
SETUP_WITH = {opmap["SETUP_WITH"], }
WITH_CLEANUP = {opmap.get("WITH_CLEANUP") or opmap.get("WITH_CLEANUP_START"), }
SETUP_ASYNC_WITH = set()
if "SETUP_ASYNC_WITH" in opmap:
SETUP_ASYNC_WITH.add(opmap["SETUP_ASYNC_WITH"])
IMPORTS = {opmap["IMPORT_NAME"], opmap["IMPORT_FROM"]}
IMPORT_NAMES = {opmap["IMPORT_NAME"],}
FOR_ITERS = {opmap["FOR_ITER"],}
GET_ITERS = {opmap["GET_ITER"],}
def cord(value):
"""Convert (str or int) to ord"""
if isinstance(value, str):
return ord(value)
return value
class ListAccessor(object): # pylint: disable=too-few-public-methods
"""List Proxy. Return value on x[i] and tuple on x(i)"""
def __init__(self, values, repr_is_val=True):
self.values = values
self.repr_is_val = repr_is_val
def __getitem__(self, index):
if self.values is not None:
return self.values[index]
return index
def __call__(self, index):
argval = self[index]
if self.repr_is_val and self.values is not None:
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
class Interpreter(object): # pylint: disable=too-many-instance-attributes
"""Bytecode iterator"""
def __init__(self, co_code, varnames=None, names=None, constants=None, # pylint: disable=too-many-arguments
cells=None, linestarts=None, line_offset=0):
self.lasti = 0
self.opi = 0
self._extended_arg = 0
self._co_code = co_code
self.varnames = ListAccessor(varnames)
self.names = ListAccessor(names)
self.consts = ListAccessor(constants, repr_is_val=False)
self.cells = ListAccessor(cells)
self.linestarts = linestarts
self.line_offset = line_offset
self._size = len(co_code)
self.opcode = None
self.oparg = 0
self._stop = False
self._map = {}
self._extra = set()
self._missing = set()
self._supported = set()
if not hasattr(self, "_known_missing"):
self._known_missing = set()
self._create_map()
def __iter__(self):
"""Restart iterator"""
self._stop = False
return self
def __call__(self, lasti=0, extended_arg=0):
self.lasti = lasti
self._extended_arg = extended_arg
def next(self):
"""Python 2 iterator"""
if self._stop:
raise StopIteration
opcode = self._next_op()
self._map[opcode]()
return opcode
def __next__(self):
"""Python 3 iterator"""
return self.next()
def _next_op(self):
"""Get next operation"""
self._set_opcode()
if self.opcode >= HAVE_ARGUMENT:
self._have_argument()
if self.lasti >= self._size:
self._stop = True
return self.opcode
def _set_opcode(self):
"""Get op from code"""
self.oparg = None
self.opcode = cord(self._co_code[self.lasti])
self.opi = self.lasti
self.lasti += 1
def _have_argument(self):
"""Read argument if op has argument"""
cod = self._co_code
i = self.lasti
self.oparg = cord(cod[i]) + cord(cod[i + 1]) * 256 + self._extended_arg
self._extended_arg = 0
self.lasti += 2
def _create_map(self):
"""Create map of functions"""
condition = lambda x, obj: (
x[0] != "_" and hasattr(obj, "__call__") and
obj.__doc__ is not None and "opcode" in obj.__doc__)
to_opcode = lambda x: x.upper().replace("__", "+")
self._map = defaultdict(lambda: self.nop)
self._extra = set()
self._missing = set()
self._supported = set()
for name in dir(self):
method = getattr(self, name)
if condition(name, method):
opcode = to_opcode(name)
if opcode not in opmap:
self._extra.add(opcode)
else:
self._map[opmap[opcode]] = method
self._supported.add(opcode)
self._missing = (
set(opmap.keys()) - self._supported - self._known_missing)
@property
def extra_opcode(self):
"""Return opcode implemented by this class
but not supported by Python
"""
return self._extra
@property
def missing_opcode(self):
"""Return opcode supported by Python
but not implemented by this class"""
return self._missing
def nop(self):
"""NOP opcode"""
pass
class InstructionInterpreter(Interpreter):
"""Mix Python3 dis._get_instructions_bytes with Python2 dis.disassemble"""
def __init__(self, *args, **kwargs):
super(InstructionInterpreter, self).__init__(*args, **kwargs)
self._labels = dis.findlabels(self._co_code)
self.starts_line = None
self.is_jump_target = False
self.argval = None
self.argrepr = None
self.current_line = -1
def _set_opcode(self):
super(InstructionInterpreter, self)._set_opcode()
if self.linestarts is not None:
self.starts_line = self.linestarts.get(self.opi, None)
if self.starts_line is not None:
self.starts_line += self.line_offset
self.current_line = self.starts_line
self.is_jump_target = self.opi in self._labels
def _have_argument(self):
super(InstructionInterpreter, self)._have_argument()
opcode = self.opcode
arg = argval = self.oparg
argrepr = ""
if opcode in dis.hasconst:
argval, argrepr = self.consts(arg)
elif opcode in dis.hasname:
argval, argrepr = self.names(arg)
elif opcode in dis.hasjrel:
argval = self.lasti + arg
argrepr = "to " + repr(argval)
elif opcode in dis.haslocal:
argval, argrepr = self.varnames(arg)
elif opcode in dis.hascompare:
argval = cmp_op[arg]
argrepr = argval
elif opcode in dis.hasfree:
argval, argrepr = self.cells(arg)
elif opcode in CALL_FUNCTIONS:
argrepr = "%d positional, %d keyword pair" % (
cord(self._co_code[self.lasti - 2]),
cord(self._co_code[self.lasti - 1]))
self.argval, self.argrepr = argval, argrepr
def next(self):
super(InstructionInterpreter, self).next()
return Instruction(
dis.opname[self.opcode], self.opcode, self.oparg, self. argval,
self.argrepr, self.opi, self.starts_line, self.is_jump_target,
self.current_line)
| 31.179283
| 127
| 0.601712
|
27d0bad42c5319ba47386fd6a2b906d7a62e3027
| 36,329
|
py
|
Python
|
aiida/backends/tests/restapi.py
|
tomzhang/aiida_core
|
949810e9f3daff0f748c5c9aa1dde4f5222bb49b
|
[
"BSD-2-Clause"
] | 1
|
2019-04-29T12:39:31.000Z
|
2019-04-29T12:39:31.000Z
|
aiida/backends/tests/restapi.py
|
tomzhang/aiida_core
|
949810e9f3daff0f748c5c9aa1dde4f5222bb49b
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/restapi.py
|
tomzhang/aiida_core
|
949810e9f3daff0f748c5c9aa1dde4f5222bb49b
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import absolute_import
import json
import unittest
import six
from six.moves import zip
from aiida.backends.testbase import AiidaTestCase
from aiida.common.links import LinkType
from aiida.orm import DataFactory
from aiida.orm.calculation import Calculation
from aiida.orm.computer import Computer
from aiida.orm.data import Data
from aiida.orm.querybuilder import QueryBuilder
from aiida.restapi.api import App, AiidaApi
StructureData = DataFactory('structure')
CifData = DataFactory('cif')
ParameterData = DataFactory('parameter')
KpointsData = DataFactory('array.kpoints')
@unittest.skipIf(six.PY3, "Broken on Python 3")
class RESTApiTestCase(AiidaTestCase):
"""
Setup of the tests for the AiiDA RESTful-api
"""
_url_prefix = "/api/v2"
_dummy_data = {}
_PERPAGE_DEFAULT = 20
_LIMIT_DEFAULT = 400
@classmethod
def setUpClass(cls):
"""
Basides the standard setup we need to add few more objects in the
database to be able to explore different requests/filters/orderings etc.
"""
# call parent setUpClass method
super(RESTApiTestCase, cls).setUpClass()
# connect the app and the api
# Init the api by connecting it the the app (N.B. respect the following
# order, api.__init__)
kwargs = dict(PREFIX=cls._url_prefix,
PERPAGE_DEFAULT=cls._PERPAGE_DEFAULT,
LIMIT_DEFAULT=cls._LIMIT_DEFAULT)
cls.app = App(__name__)
cls.app.config['TESTING'] = True
api = AiidaApi(cls.app, **kwargs)
# create test inputs
cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))
structure = StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])
structure.store()
cif = CifData(ase=structure.get_ase())
cif.store()
parameter1 = ParameterData(dict={"a": 1, "b": 2})
parameter1.store()
parameter2 = ParameterData(dict={"c": 3, "d": 4})
parameter2.store()
kpoint = KpointsData()
kpoint.set_kpoints_mesh([4, 4, 4])
kpoint.store()
calc = Calculation()
calc._set_attr("attr1", "OK")
calc._set_attr("attr2", "OK")
calc.store()
calc.add_link_from(structure)
calc.add_link_from(parameter1)
kpoint.add_link_from(calc, link_type=LinkType.CREATE)
calc1 = Calculation()
calc1.store()
from aiida.orm.computer import Computer
dummy_computers = [
{
"name": "test1",
"hostname": "test1.epfl.ch",
"transport_type": "ssh",
"scheduler_type": "pbspro",
},
{
"name": "test2",
"hostname": "test2.epfl.ch",
"transport_type": "ssh",
"scheduler_type": "torque",
},
{
"name": "test3",
"hostname": "test3.epfl.ch",
"transport_type": "local",
"scheduler_type": "slurm",
},
{
"name": "test4",
"hostname": "test4.epfl.ch",
"transport_type": "ssh",
"scheduler_type": "slurm",
}
]
for dummy_computer in dummy_computers:
computer = cls.backend.computers.create(**dummy_computer)
computer.store()
# Prepare typical REST responses
cls.process_dummy_data()
def get_dummy_data(self):
return self._dummy_data
def get_url_prefix(self):
return self._url_prefix
@classmethod
def process_dummy_data(cls):
"""
This functions prepare atomic chunks of typical responses from the
RESTapi and puts them into class attributes
"""
#TODO: Storing the different nodes as lists and accessing them
# by their list index is very fragile and a pain to debug.
# Please change this!
computer_projections = ["id", "uuid", "name", "hostname",
"transport_type", "scheduler_type"]
computers = QueryBuilder().append(
Computer, tag="comp", project=computer_projections).order_by(
{'comp': [{'name': {'order': 'asc'}}]}).dict()
# Cast UUID into a string (e.g. in sqlalchemy it comes as a UUID object)
computers = [_['comp'] for _ in computers]
for comp in computers:
if comp['uuid'] is not None:
comp['uuid'] = str(comp['uuid'])
cls._dummy_data["computers"] = computers
calculation_projections = ["id", "uuid", "user_id", "type"]
calculations = QueryBuilder().append(Calculation, tag="calc",
project=calculation_projections).order_by(
{'calc': [{'id': {'order': 'desc'}}]}).dict()
calculations = [_['calc'] for _ in calculations]
for calc in calculations:
if calc['uuid'] is not None:
calc['uuid'] = str(calc['uuid'])
cls._dummy_data["calculations"] = calculations
data_projections = ["id", "uuid", "user_id", "type"]
data_types = {
'cifdata': CifData,
'parameterdata': ParameterData,
'structuredata': StructureData,
'data': Data,
}
for label, dataclass in data_types.items():
data = QueryBuilder().append(dataclass, tag="data", project=data_projections).order_by(
{'data': [{'id': {'order': 'desc'}}]}).dict()
data = [_['data'] for _ in data]
for datum in data:
if datum['uuid'] is not None:
datum['uuid'] = str(datum['uuid'])
cls._dummy_data[label] = data
def split_path(self, url):
"""
Split the url with "?" to get url path and it's parameters
:param url: Web url
:return: url path and url parameters
"""
parts = url.split("?")
path = ""
query_string = ""
if len(parts) > 0:
path = parts[0]
if len(parts) > 1:
query_string = parts[1]
return path, query_string
def compare_extra_response_data(self, node_type, url, response, uuid=None):
"""
In url response, we pass some extra information/data along with the node
results. e.g. url method, node_type, path, pk, query_string, url,
url_root,
etc.
:param node_type: url requested fot the type of the node
:param url: web url
:param response: url response
:param uuid: url requested for the node pk
"""
path, query_string = self.split_path(url)
self.assertEqual(response["method"], "GET")
self.assertEqual(response["resource_type"], node_type)
self.assertEqual(response["path"], path)
self.assertEqual(response["id"], uuid)
self.assertEqual(response["query_string"], query_string)
self.assertEqual(response["url"], "http://localhost" + url)
self.assertEqual(response["url_root"], "http://localhost/")
###### node details and list with limit, offset, page, perpage ####
def process_test(self, node_type, url, full_list=False, empty_list=False,
expected_list_ids=[], expected_range=[],
expected_errormsg=None, uuid=None, result_node_type=None,
result_name=None):
"""
Check whether response matches expected values.
:param node_type: url requested fot the type of the node
:param url: web url
:param full_list: if url is requested to get full list
:param empty_list: if the response list is empty
:param expected_list_ids: list of expected ids from data
:param expected_range: [start, stop] range of expected ids from data
:param expected_errormsg: expected error message in response
:param uuid: url requested for the node pk
:param result_node_type: node type in response data
:param result_name: result name in response e.g. inputs, outputs
"""
if result_node_type is None and result_name is None:
result_node_type = node_type
result_name = node_type
url = self._url_prefix + url
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
if expected_errormsg:
self.assertEqual(response["message"], expected_errormsg)
else:
if full_list:
expected_data = self._dummy_data[result_node_type]
elif empty_list:
expected_data = []
elif len(expected_list_ids) > 0:
expected_data = [self._dummy_data[result_node_type][i]
for i in expected_list_ids]
elif expected_range != []:
expected_data = self._dummy_data[result_node_type][
expected_range[0]:expected_range[1]]
else:
from aiida.common.exceptions import InputValidationError
raise InputValidationError(
"Pass the expected range of the dummydata")
self.assertEqual(
len(response["data"][result_name]), len(expected_data))
for expected_node, response_node in zip(expected_data,
response["data"][
result_name]):
self.assertEqual(response_node['uuid'], expected_node['uuid'])
self.compare_extra_response_data(node_type, url, response, uuid)
######## check exception #########
def node_exception(self, url, exception_type):
"""
Assert exception if any unknown parameter is passed in url
:param url: web url
:param exception_type: exception to be thrown
:return:
"""
self.assertRaises(exception_type, self.app.get(url))
class RESTApiTestSuite(RESTApiTestCase):
"""
"""
############### single computer ########################
def test_computers_details(self):
"""
Requests the details of single computer
"""
node_uuid = self.get_dummy_data()["computers"][0]["uuid"]
RESTApiTestCase.process_test(self, "computers",
"/computers/" + str(node_uuid),
expected_list_ids=[0], uuid=node_uuid)
############### full list with limit, offset, page, perpage #############
def test_computers_list(self):
"""
Get the full list of computers from database
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+id", full_list=True)
def test_computers_list_limit_offset(self):
"""
Get the list of computers from database using limit
and offset parameter.
It should return the no of rows specified in limit from
database starting from the no. specified in offset
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?limit=2&offset=2&orderby=+id",
expected_range=[2, 4])
def test_computers_list_limit_only(self):
"""
Get the list of computers from database using limit
parameter.
It should return the no of rows specified in limit from
database.
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?limit=2&orderby=+id",
expected_range=[None, 2])
def test_computers_list_offset_only(self):
"""
Get the list of computers from database using offset
parameter
It should return all the rows from database starting from
the no. specified in offset
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?offset=2&orderby=+id",
expected_range=[2, None])
def test_computers_list_limit_offset_perpage(self):
"""
If we pass the limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = "perpage key is incompatible with limit and offset"
RESTApiTestCase.process_test(self, "computers",
"/computers?offset=2&limit=1&perpage=2&orderby=+id",
expected_errormsg=expected_error)
def test_computers_list_page_limit_offset(self):
"""
If we use the page, limit and offset at same time, it
would return the error message.
"""
expected_error = "requesting a specific page is incompatible with " \
"limit and offset"
RESTApiTestCase.process_test(self, "computers",
"/computers/page/2?offset=2&limit=1&orderby=+id",
expected_errormsg=expected_error)
def test_computers_list_page_limit_offset_perpage(self):
"""
If we use the page, limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = "perpage key is incompatible with limit and offset"
RESTApiTestCase.process_test(self, "computers",
"/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id",
expected_errormsg=expected_error)
def test_computers_list_page_default(self):
"""
it returns the no. of rows defined as default perpage option
from database.
no.of pages = total no. of computers in database / perpage
"/page" acts as "/page/1?perpage=default_value"
"""
RESTApiTestCase.process_test(self, "computers",
"/computers/page?orderby=+id",
full_list=True)
def test_computers_list_page_perpage(self):
"""
no.of pages = total no. of computers in database / perpage
Using this formula it returns the no. of rows for requested page
"""
RESTApiTestCase.process_test(self, "computers",
"/computers/page/1?perpage=2&orderby=+id",
expected_range=[None, 2])
def test_computers_list_page_perpage_exceed(self):
"""
no.of pages = total no. of computers in database / perpage
If we request the page which exceeds the total no. of pages then
it would return the error message.
"""
expected_error = "Non existent page requested. The page range is [1 : " \
"3]"
RESTApiTestCase.process_test(self, "computers",
"/computers/page/4?perpage=2&orderby=+id",
expected_errormsg=expected_error)
############### list filters ########################
def test_computers_filter_id1(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
"/computers?id=" + str(node_pk),
expected_list_ids=[0])
def test_computers_filter_id2(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id > 2)
"""
node_pk = self.get_dummy_data()["computers"][1]["id"]
RESTApiTestCase.process_test(self, "computers", "/computers?id>" + str(
node_pk) + "&orderby=+id",
expected_range=[2, None])
def test_computers_filter_pk(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
"/computers?pk=" + str(node_pk),
expected_list_ids=[0])
def test_computers_filter_name(self):
"""
Add filter for the name of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, "computers",
'/computers?name="test1"',
expected_list_ids=[1])
def test_computers_filter_hostname(self):
"""
Add filter for the hostname of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, "computers",
'/computers?hostname="test1.epfl.ch"',
expected_list_ids=[1])
def test_computers_filter_transport_type(self):
"""
Add filter for the transport_type of computer and get the filtered
computer
list
"""
RESTApiTestCase.process_test(self, "computers",
'/computers?transport_type="local"&orderby=+id',
expected_list_ids=[0, 3])
############### list orderby ########################
def test_computers_orderby_id_asc(self):
"""
Returns the computers list ordered by "id" in ascending
order
"""
RESTApiTestCase.process_test(self, "computers", "/computers?orderby=id",
full_list=True)
def test_computers_orderby_id_asc_sign(self):
"""
Returns the computers list ordered by "+id" in ascending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+id",
full_list=True)
def test_computers_orderby_id_desc(self):
"""
Returns the computers list ordered by "id" in descending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=-id",
expected_list_ids=[4, 3, 2, 1, 0])
def test_computers_orderby_name_asc(self):
"""
Returns the computers list ordered by "name" in ascending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=name",
full_list=True)
def test_computers_orderby_name_asc_sign(self):
"""
Returns the computers list ordered by "+name" in ascending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+name",
full_list=True)
def test_computers_orderby_name_desc(self):
"""
Returns the computers list ordered by "name" in descending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=-name",
expected_list_ids=[4, 3, 2, 1, 0])
def test_computers_orderby_scheduler_type_asc(self):
"""
Returns the computers list ordered by "scheduler_type" in ascending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=scheduler_type",
expected_list_ids=[0, 1, 3, 4, 2])
def test_computers_orderby_scheduler_type_asc_sign(self):
"""
Returns the computers list ordered by "+scheduler_type" in ascending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+scheduler_type",
expected_list_ids=[0, 1, 3, 4, 2])
def test_computers_orderby_scheduler_type_desc(self):
"""
Returns the computers list ordered by "scheduler_type" in descending
order
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=-scheduler_type",
expected_list_ids=[2, 3, 4, 0, 1])
############### list orderby combinations #######################
def test_computers_orderby_mixed1(self):
"""
Returns the computers list first order by "transport_type" in
ascending order and if it is having same transport_type, order it
by "id"
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=transport_type,id",
expected_list_ids=[0, 3, 1, 2, 4])
def test_computers_orderby_mixed2(self):
"""
Returns the computers list first order by "scheduler_type" in
descending order and if it is having same scheduler_type, order it
by "name"
"""
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=-scheduler_type,name",
expected_list_ids=[2, 3, 4, 0, 1])
def test_computers_orderby_mixed3(self):
"""
Returns the computers list first order by "scheduler_type" in
ascending order and if it is having same scheduler_type, order it
by "hostname" descending order
Response::
test4 slurm
test3 slurm
test2 torque
test1 pbspro
localhost pbspro
==========
Expected::
test1 pbspro
localhost pbspro
test4 slurm
test3 slurm
test2 torque
test1 test4
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+scheduler_type,
-hostname",
expected_list_ids=[1,0,4,3,2])
"""
pass
############### list filter combinations #######################
def test_computers_filter_mixed1(self):
"""
Add filter for the hostname and id of computer and get the
filtered computer list
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
'/computers?id>' + str(
node_pk) + '&hostname="test1.epfl.ch"',
expected_list_ids=[1])
def test_computers_filter_mixed2(self):
"""
Add filter for the id, hostname and transport_type of the computer
and get the filtered computer list
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
'/computers?id>' + str(
node_pk) +
'&hostname="test3.epfl.ch"&transport_type="ssh"',
empty_list=True)
############### list all parameter combinations #######################
def test_computers_mixed1(self):
"""
url parameters: id, limit and offset
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
"/computers?id>" + str(
node_pk) + "&limit=2&offset=3",
expected_list_ids=[4])
def test_computers_mixed2(self):
"""
url parameters: id, page, perpage
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
"/computers/page/2?id>" + str(
node_pk) + "&perpage=2&orderby=+id",
expected_list_ids=[3, 4])
def test_computers_mixed3(self):
"""
url parameters: id, transport_type, orderby
"""
node_pk = self.get_dummy_data()["computers"][0]["id"]
RESTApiTestCase.process_test(self, "computers",
'/computers?id>=' + str(
node_pk) +
'&transport_type="ssh"&orderby=-id&limit=2',
expected_list_ids=[4, 2])
########## pass unknown url parameter ###########
def test_computers_unknown_param(self):
"""
url parameters: id, limit and offset
from aiida.common.exceptions import InputValidationError
RESTApiTestCase.node_exception(self, "/computers?aa=bb&id=2", InputValidationError)
"""
pass
############### single calculation ########################
def test_calculations_details(self):
"""
Requests the details of single calculation
"""
node_uuid = self.get_dummy_data()["calculations"][0]["uuid"]
RESTApiTestCase.process_test(self, "calculations",
"/calculations/" + str(node_uuid),
expected_list_ids=[0], uuid=node_uuid)
############### full list with limit, offset, page, perpage #############
def test_calculations_list(self):
"""
Get the full list of calculations from database
"""
RESTApiTestCase.process_test(self, "calculations",
"/calculations?orderby=-id",
full_list=True)
def test_calculations_list_limit_offset(self):
"""
Get the list of calculations from database using limit
and offset parameter.
It should return the no of rows specified in limit from
database starting from the no. specified in offset
"""
RESTApiTestCase.process_test(self, "calculations",
"/calculations?limit=1&offset=1&orderby=+id",
expected_list_ids=[0])
############### calculation inputs #############
def test_calculation_inputs(self):
"""
Get the list of give calculation inputs
"""
node_uuid = self.get_dummy_data()["calculations"][1]["uuid"]
self.process_test("calculations", "/calculations/" + str(
node_uuid) + "/io/inputs?orderby=id",
expected_list_ids=[4, 2], uuid=node_uuid,
result_node_type="data",
result_name="inputs")
def test_calculation_input_filters(self):
"""
Get filtered inputs list for given calculations
"""
node_uuid = self.get_dummy_data()["calculations"][1]["uuid"]
self.process_test("calculations", '/calculations/' + str(
node_uuid) + '/io/inputs?type="data.parameter.ParameterData."',
expected_list_ids=[2], uuid=node_uuid,
result_node_type="data",
result_name="inputs")
############### calculation attributes #############
def test_calculation_attributes(self):
"""
Get list of calculation attributes
"""
node_uuid = self.get_dummy_data()["calculations"][1]["uuid"]
url = self.get_url_prefix() + "/calculations/" + str(
node_uuid) + "/content/attributes"
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
self.assertEqual(response["data"]["attributes"],
{'attr2': 'OK', 'attr1': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, "calculations",
url,
response, uuid=node_uuid)
def test_calculation_attributes_nalist_filter(self):
"""
Get list of calculation attributes with filter nalist
"""
node_uuid = self.get_dummy_data()["calculations"][1]["uuid"]
url = self.get_url_prefix() + '/calculations/' + str(
node_uuid) + '/content/attributes?nalist="attr1"'
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
self.assertEqual(response["data"]["attributes"], {'attr2': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, "calculations",
url,
response, uuid=node_uuid)
def test_calculation_attributes_alist_filter(self):
"""
Get list of calculation attributes with filter alist
"""
node_uuid = self.get_dummy_data()["calculations"][1]["uuid"]
url = self.get_url_prefix() + '/calculations/' + str(
node_uuid) + '/content/attributes?alist="attr1"'
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
self.assertEqual(response["data"]["attributes"], {'attr1': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, "calculations",
url,
response, uuid=node_uuid)
############### Structure visualization and download #############
def test_structure_visualization(self):
"""
Get the list of give calculation inputs
"""
from aiida.backends.tests.dataclasses import simplify
node_uuid = self.get_dummy_data()["structuredata"][0]["uuid"]
url = self.get_url_prefix() + '/structures/' + str(
node_uuid) + '/content/visualization?visformat=cif'
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
expected_visdata = """\n##########################################################################\n# Crystallographic Information Format file \n# Produced by PyCifRW module\n# \n# This is a CIF file. CIF has been adopted by the International\n# Union of Crystallography as the standard for data archiving and \n# transmission.\n#\n# For information on this file format, follow the CIF links at\n# http://www.iucr.org\n##########################################################################\n\ndata_0\nloop_\n _atom_site_label\n _atom_site_fract_x\n _atom_site_fract_y\n _atom_site_fract_z\n _atom_site_type_symbol\n Ba1 0.0 0.0 0.0 Ba\n \n_cell_angle_alpha 90.0\n_cell_angle_beta 90.0\n_cell_angle_gamma 90.0\n_cell_length_a 2.0\n_cell_length_b 2.0\n_cell_length_c 2.0\nloop_\n _symmetry_equiv_pos_as_xyz\n 'x, y, z'\n \n_symmetry_int_tables_number 1\n_symmetry_space_group_name_H-M 'P 1'\n"""
self.assertEquals(simplify(response["data"]["visualization"]["str_viz_info"]["data"]),simplify(expected_visdata))
self.assertEquals(response["data"]["visualization"]["str_viz_info"]["format"],"cif")
self.assertEquals(response["data"]["visualization"]["dimensionality"],
{u'dim': 3, u'value': 8.0, u'label': u'volume'})
self.assertEquals(response["data"]["visualization"]["pbc"], [True,True,True])
self.assertEquals(response["data"]["visualization"]["formula"], "Ba")
RESTApiTestCase.compare_extra_response_data(self, "structures",
url,
response, uuid=node_uuid)
def test_xsf_visualization(self):
"""
Get the list of given calculation inputs
"""
from aiida.backends.tests.dataclasses import simplify
node_uuid = self.get_dummy_data()["structuredata"][0]["uuid"]
url = self.get_url_prefix() + '/structures/' + str(
node_uuid) + '/content/visualization?visformat=xsf'
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
expected_visdata = "CRYSTAL\nPRIMVEC 1\n 2.0000000000 0.0000000000 0.0000000000\n 0.0000000000 2.0000000000 0.0000000000\n 0.0000000000 0.0000000000 2.0000000000\nPRIMCOORD 1\n1 1\n56 0.0000000000 0.0000000000 0.0000000000\n"
self.assertEquals(simplify(response["data"]["visualization"]["str_viz_info"]["data"]),simplify(expected_visdata))
self.assertEquals(response["data"]["visualization"]["str_viz_info"]["format"],"xsf")
self.assertEquals(response["data"]["visualization"]["dimensionality"],
{u'dim': 3, u'value': 8.0, u'label': u'volume'})
self.assertEquals(response["data"]["visualization"]["pbc"], [True,True,True])
self.assertEquals(response["data"]["visualization"]["formula"], "Ba")
RESTApiTestCase.compare_extra_response_data(self, "structures",
url,
response, uuid=node_uuid)
def test_cif(self):
"""
Test download of cif file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()["cifdata"][0]["uuid"]
url = self.get_url_prefix() + '/cifs/' + node_uuid + '/content/download'
with self.app.test_client() as client:
rv = client.get(url)
cif = load_node(node_uuid)._prepare_cif()[0]
self.assertEquals(rv.data, cif )
############### schema #############
def test_schema(self):
"""
test schema
"""
for nodetype in ["nodes", "calculations", "data", "codes", "computers", "users", "groups"]:
url = self.get_url_prefix() + '/' + nodetype + '/schema'
with self.app.test_client() as client:
rv = client.get(url)
response = json.loads(rv.data)
expected_keys = ["display_name", "help_text", "is_display", "is_foreign_key", "type"]
# check fields
for pkey, pinfo in response["data"]["fields"].items():
available_keys = pinfo.keys()
for prop in expected_keys:
self.assertIn(prop, available_keys)
# check order
available_properties = response["data"]["fields"].keys()
for prop in response["data"]["ordering"]:
self.assertIn(prop, available_properties)
| 42.490058
| 1,120
| 0.535027
|
45be6a79f8aeb3147ee92fc1b7dde3b9dd21bc32
| 7,075
|
py
|
Python
|
tests/test_formats.py
|
bzinberg/jupytext
|
219dc18740c0277aaf49ef65e2b7a7ac507d5085
|
[
"MIT"
] | 1
|
2020-09-10T13:16:03.000Z
|
2020-09-10T13:16:03.000Z
|
tests/test_formats.py
|
urbach/jupytext
|
6d3a38505ae539975085f9d5b4e457c9566a7977
|
[
"MIT"
] | null | null | null |
tests/test_formats.py
|
urbach/jupytext
|
6d3a38505ae539975085f9d5b4e457c9566a7977
|
[
"MIT"
] | null | null | null |
import pytest
from testfixtures import compare
from nbformat.v4.nbbase import new_notebook
import jupytext
from jupytext.formats import guess_format, divine_format, read_format_from_metadata, rearrange_jupytext_metadata
from jupytext.formats import long_form_multiple_formats, short_form_multiple_formats, update_jupytext_formats_metadata
from jupytext.formats import get_format_implementation, validate_one_format, JupytextFormatError
from .utils import list_notebooks
@pytest.mark.parametrize('nb_file', list_notebooks('python'))
def test_guess_format_light(nb_file):
with open(nb_file) as stream:
assert guess_format(stream.read(), ext='.py') == 'light'
@pytest.mark.parametrize('nb_file', list_notebooks('percent'))
def test_guess_format_percent(nb_file):
with open(nb_file) as stream:
assert guess_format(stream.read(), ext='.py') == 'percent'
@pytest.mark.parametrize('nb_file', list_notebooks('sphinx'))
def test_guess_format_sphinx(nb_file):
with open(nb_file) as stream:
assert guess_format(stream.read(), ext='.py') == 'sphinx'
def test_divine_format():
assert divine_format('{"cells":[]}') == 'ipynb'
assert divine_format('''def f(x):
x + 1''') == 'py:light'
assert divine_format('''# %%
def f(x):
x + 1
# %%
def g(x):
x + 2
''') == 'py:percent'
assert divine_format('''This is a markdown file
with one code block
```
1 + 1
```
''') == 'md'
assert divine_format(''';; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .ss
;; format_name: percent
;; ---''') == 'ss:percent'
def test_get_format_implementation():
assert get_format_implementation('.py').format_name == 'light'
assert get_format_implementation('.py', 'percent').format_name == 'percent'
with pytest.raises(JupytextFormatError):
get_format_implementation('.py', 'wrong_format')
def test_script_with_magics_not_percent(script="""# %%time
1 + 2"""):
assert guess_format(script, '.py') == 'light'
def test_script_with_spyder_cell_is_percent(script="""#%%
1 + 2"""):
assert guess_format(script, '.py') == 'percent'
def test_script_with_percent_cell_and_magic_is_hydrogen(script="""#%%
%matplotlib inline
"""):
assert guess_format(script, '.py') == 'hydrogen'
def test_script_with_spyder_cell_with_name_is_percent(script="""#%% cell name
1 + 2"""):
assert guess_format(script, '.py') == 'percent'
def test_read_format_from_metadata(script="""---
jupyter:
jupytext:
formats: ipynb,pct.py:percent,lgt.py:light,spx.py:sphinx,md,Rmd
text_representation:
extension: .pct.py
format_name: percent
format_version: '1.1'
jupytext_version: 0.8.0
---"""):
assert read_format_from_metadata(script, '.Rmd') is None
def test_update_jupytext_formats_metadata():
nb = new_notebook(metadata={'jupytext': {'formats': 'py'}})
update_jupytext_formats_metadata(nb.metadata, 'py:light')
assert nb.metadata['jupytext']['formats'] == 'py:light'
nb = new_notebook(metadata={'jupytext': {'formats': 'ipynb,py'}})
update_jupytext_formats_metadata(nb.metadata, 'py:light')
assert nb.metadata['jupytext']['formats'] == 'ipynb,py:light'
def test_decompress_formats():
assert long_form_multiple_formats('ipynb') == [{'extension': '.ipynb'}]
assert long_form_multiple_formats('ipynb,md') == [{'extension': '.ipynb'}, {'extension': '.md'}]
assert long_form_multiple_formats('ipynb,py:light') == [{'extension': '.ipynb'},
{'extension': '.py', 'format_name': 'light'}]
assert long_form_multiple_formats(['ipynb', '.py:light']) == [{'extension': '.ipynb'},
{'extension': '.py', 'format_name': 'light'}]
assert long_form_multiple_formats('.pct.py:percent') == [
{'extension': '.py', 'suffix': '.pct', 'format_name': 'percent'}]
def test_compress_formats():
assert short_form_multiple_formats([{'extension': '.ipynb'}]) == 'ipynb'
assert short_form_multiple_formats([{'extension': '.ipynb'}, {'extension': '.md'}]) == 'ipynb,md'
assert short_form_multiple_formats(
[{'extension': '.ipynb'}, {'extension': '.py', 'format_name': 'light'}]) == 'ipynb,py:light'
assert short_form_multiple_formats([{'extension': '.ipynb'},
{'extension': '.py', 'format_name': 'light'},
{'extension': '.md', 'comment_magics': True}]) == 'ipynb,py:light,md'
assert short_form_multiple_formats(
[{'extension': '.py', 'suffix': '.pct', 'format_name': 'percent'}]) == '.pct.py:percent'
def test_rearrange_jupytext_metadata():
metadata = {'nbrmd_formats': 'ipynb,py'}
rearrange_jupytext_metadata(metadata)
compare({'jupytext': {'formats': 'ipynb,py'}}, metadata)
metadata = {'jupytext_formats': 'ipynb,py'}
rearrange_jupytext_metadata(metadata)
compare({'jupytext': {'formats': 'ipynb,py'}}, metadata)
metadata = {'executable': '#!/bin/bash'}
rearrange_jupytext_metadata(metadata)
compare({'jupytext': {'executable': '#!/bin/bash'}}, metadata)
def test_rearrange_jupytext_metadata_metadata_filter():
metadata = {'jupytext': {'metadata_filter': {'notebook': {'additional': ['one', 'two'], 'excluded': 'all'},
'cells': {'additional': 'all', 'excluded': ['three', 'four']}}}}
rearrange_jupytext_metadata(metadata)
compare({'jupytext': {'notebook_metadata_filter': 'one,two,-all',
'cell_metadata_filter': 'all,-three,-four'}}, metadata)
def test_rearrange_jupytext_metadata_add_dot_in_suffix():
metadata = {'jupytext': {'text_representation': {'jupytext_version': '0.8.6'},
'formats': 'ipynb,pct.py,lgt.py'}}
rearrange_jupytext_metadata(metadata)
compare({'jupytext': {'text_representation': {'jupytext_version': '0.8.6'},
'formats': 'ipynb,.pct.py,.lgt.py'}}, metadata)
def test_fix_139():
text = """# ---
# jupyter:
# jupytext:
# metadata_filter:
# cells:
# additional:
# - "lines_to_next_cell"
# excluded:
# - "all"
# ---
# + {"lines_to_next_cell": 2}
1 + 1
# -
1 + 1
"""
nb = jupytext.reads(text, 'py:light')
text2 = jupytext.writes(nb, 'py:light')
assert 'cell_metadata_filter: -all' in text2
assert 'lines_to_next_cell' not in text2
def test_validate_one_format():
with pytest.raises(JupytextFormatError):
validate_one_format('py:percent')
with pytest.raises(JupytextFormatError):
validate_one_format({})
with pytest.raises(JupytextFormatError):
validate_one_format({'extension': '.py', 'unknown_option': True})
with pytest.raises(JupytextFormatError):
validate_one_format({'extension': '.py', 'comment_magics': 'TRUE'})
def test_set_auto_ext():
with pytest.raises(ValueError):
long_form_multiple_formats('ipynb,auto:percent', {})
| 34.681373
| 118
| 0.646502
|
d9161d5c5935873264138e0df1a6aa7b2631ce3e
| 195
|
py
|
Python
|
jobs/views.py
|
safuente/santiago-alvarez-cv
|
01193bb32e14c2f54483f04e669d113b50bcb731
|
[
"MIT"
] | null | null | null |
jobs/views.py
|
safuente/santiago-alvarez-cv
|
01193bb32e14c2f54483f04e669d113b50bcb731
|
[
"MIT"
] | null | null | null |
jobs/views.py
|
safuente/santiago-alvarez-cv
|
01193bb32e14c2f54483f04e669d113b50bcb731
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .models import Job
def home(request):
jobs = Job.objects.all()
context = {'jobs': jobs}
return render(request, 'jobs/home.html', context)
| 17.727273
| 53
| 0.687179
|
f6d65c62d3731ff69a00c46dbd147b42c7e01eb2
| 4,547
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/layout/xaxis/rangeslider/_yaxis.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/xaxis/rangeslider/_yaxis.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/xaxis/rangeslider/_yaxis.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class YAxis(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.xaxis.rangeslider"
_path_str = "layout.xaxis.rangeslider.yaxis"
_valid_props = {"range", "rangemode"}
# range
# -----
@property
def range(self):
"""
Sets the range of this axis for the rangeslider.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
# rangemode
# ---------
@property
def rangemode(self):
"""
Determines whether or not the range of this axis in the
rangeslider use the same value than in the main plot when
zooming in/out. If "auto", the autorange will be used. If
"fixed", the `range` is used. If "match", the current range of
the corresponding y-axis on the main subplot is used.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'fixed', 'match']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
range
Sets the range of this axis for the rangeslider.
rangemode
Determines whether or not the range of this axis in the
rangeslider use the same value than in the main plot
when zooming in/out. If "auto", the autorange will be
used. If "fixed", the `range` is used. If "match", the
current range of the corresponding y-axis on the main
subplot is used.
"""
def __init__(self, arg=None, range=None, rangemode=None, **kwargs):
"""
Construct a new YAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.xaxis.r
angeslider.YAxis`
range
Sets the range of this axis for the rangeslider.
rangemode
Determines whether or not the range of this axis in the
rangeslider use the same value than in the main plot
when zooming in/out. If "auto", the autorange will be
used. If "fixed", the `range` is used. If "match", the
current range of the corresponding y-axis on the main
subplot is used.
Returns
-------
YAxis
"""
super(YAxis, self).__init__("yaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.xaxis.rangeslider.YAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.rangeslider.YAxis`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("range", None)
_v = range if range is not None else _v
if _v is not None:
self["range"] = _v
_v = arg.pop("rangemode", None)
_v = rangemode if rangemode is not None else _v
if _v is not None:
self["rangemode"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 31.143836
| 84
| 0.550913
|
263450e786ad4556ed719387576f213c6877a59e
| 3,391
|
py
|
Python
|
papers/4318-2020-Stonehouse/Examples/CreditRisk.py
|
andybouts/sas-global-forum-2020
|
b73f8672ec5395e2afa21b8a738802f7f25e09e8
|
[
"Apache-2.0"
] | 30
|
2020-01-17T19:46:09.000Z
|
2022-03-16T08:01:59.000Z
|
papers/4318-2020-Stonehouse/Examples/CreditRisk.py
|
andybouts/sas-global-forum-2020
|
b73f8672ec5395e2afa21b8a738802f7f25e09e8
|
[
"Apache-2.0"
] | 6
|
2020-02-11T17:04:40.000Z
|
2020-11-03T17:04:37.000Z
|
papers/4318-2020-Stonehouse/Examples/CreditRisk.py
|
andybouts/sas-global-forum-2020
|
b73f8672ec5395e2afa21b8a738802f7f25e09e8
|
[
"Apache-2.0"
] | 70
|
2020-01-16T15:06:56.000Z
|
2022-03-22T21:54:37.000Z
|
#%% Import packages
import swat
import pandas as pd
import os
from sys import platform
import riskpy
from os.path import join as path
#%% 1a. Start the CAS session
if "CASHOST" in os.environ:
# Create a session to the CASHOST and CASPORT variables set in your environment
conn = riskpy.SessionContext(session=swat.CAS(),
caslib="CASUSER")
else:
# Otherwise set this to your host and port:
host = "riskpy.rqs-cloud.sashq-d.openstack.sas.com"
port = 5570
conn = riskpy.SessionContext(session=swat.CAS(host, port), caslib="CASUSER")
#%% 1b. Setup input/output paths - change as needed for your running environment:
base_dir = '.'
# Set output location
if platform == "win32":
# Windows...
output_dir = 'u:\\temp'
else:
# platform == "linux" or platform == "linux2" or platform == "darwin":
output_dir = '/tmp'
#%% 3. Create object: scenarios
mkt_data = riskpy.MarketData(
current = pd.DataFrame(data={'uerate': 6.0}, index=[0]),
risk_factors = ['uerate'])
#%% Create scenarios
my_scens = riskpy.Scenarios(
name = "my_scens",
market_data = mkt_data,
data = path("datasources","CreditRisk",'uerate_scenario.xlsx'))
#%% 4. Create object: Counterparties
cpty_df = pd.read_excel(path("datasources","CreditRisk",'uerate_cpty.xlsx'))
loan_groups = riskpy.Counterparties(data=pd.read_excel(
path("datasources","CreditRisk",'uerate_cpty.xlsx')))
loan_groups.mapping = {"cpty1": "score_uerate"}
#%% 5. Create object scoring methods
score_code_file=(path("methods","CreditRisk",'score_uerate.sas'))
scoring_methods = riskpy.MethodLib(
method_code=path("methods","CreditRisk",'score_uerate.sas'))
#%% 6.Generate scores (Scores object)
my_scores = riskpy.Scores(counterparties=loan_groups,
scenarios=my_scens,
method_lib=scoring_methods)
my_scores.generate(session_context=conn, write_allscore=True)
print(my_scores.allscore.head())
allscore_file = path(output_dir, 'simple_allscores.xlsx')
my_scores.allscore.to_excel(allscore_file)
#%% 7. Create object: Portfolio
portfolio = riskpy.Portfolio(
data=path("datasources","CreditRisk",'retail_portfolio.xlsx'),
class_variables = ["region", "cptyid"])
#%% 8. Create object: Evaluation methods
eval_methods = riskpy.MethodLib(
method_code=path("methods","CreditRisk",'credit_method2.sas'))
#%% 9. Run analysis (Values object)
my_values = riskpy.Values(
session_context=conn,
portfolio=portfolio,
output_variables=["Expected_Credit_Loss"],
scenarios=my_scens,
scores=my_scores,
method_lib=eval_methods,
mapping = {"Retail": "ecl_method"})
my_values.evaluate(write_prices=True)
allprice_df = my_values.fetch_prices(max_rows=100000)
print(my_values.allprice.head())
allprice_file = path(output_dir, 'creditrisk_allprice.xlsx')
allprice_df.to_excel(allprice_file)
#%% 10. Get results
results = riskpy.Results(
session_context=conn,
values=my_values,
requests=["_TOP_", ["region"]],
out_type="values"
)
results_df = results.query().to_frame()
print(results_df.head())
rollup_file = path(output_dir, 'creditrisk_rollup_by_region.xlsx')
results_df.to_excel(rollup_file)
| 33.91
| 83
| 0.676792
|
10d9e64eb4f4dbc76b806256db1ed7c7ce45e4d9
| 1,710
|
py
|
Python
|
forklog.py
|
z00k0/scrape
|
ddf49c3ce6a165bfe8099db187e605ac2a6b5754
|
[
"MIT"
] | null | null | null |
forklog.py
|
z00k0/scrape
|
ddf49c3ce6a165bfe8099db187e605ac2a6b5754
|
[
"MIT"
] | null | null | null |
forklog.py
|
z00k0/scrape
|
ddf49c3ce6a165bfe8099db187e605ac2a6b5754
|
[
"MIT"
] | null | null | null |
import requests
import codecs
import json
import re
import logging
from bs4 import BeautifulSoup
logging.basicConfig(
level=logging.DEBUG,
filename='forklog.log',
format='%(asctime)s %(levelname)s:%(message)s'
)
session = requests.Session()
url = "https://forklog.com/news/"
resp = session.get(url)
soup = BeautifulSoup(resp.content, 'lxml')
with codecs.open('news.html', 'w', 'utf-8') as file: # save news.html for debugging
file.write(resp.text)
news_dict = {}
counter = 0
for item in soup.findAll('div', 'post_item'):
news_link = item.find('a')['href']
news_date = item.find('span', 'post_date').text
if 14 < int(news_date.split('.')[0]) < 27:
news_dict[counter] = (news_date, news_link)
counter += 1
with open('dict.json', 'w', encoding='utf-8') as file: # save news links as json for debugging
json.dump(news_dict, file, ensure_ascii=False, indent=4)
code_list = {}
for key, value in news_dict.items():
news_resp = session.get(value[1])
news_soup = BeautifulSoup(news_resp.content, 'lxml')
news = news_soup.find('div', 'post_content')
try:
code = news.find('strong').text
m = re.search('\[?(\d+)\s—\s(\w+)\]?', code)
number = int(m.group(1))
word = m.group(2)
code_list[number] = word
except Exception as ex:
print(ex)
number = 0
with open('words.json', 'r', encoding='utf-8') as file:
words = json.load(file)
for key, value in code_list:
if key not in words:
words[key] = word
with open('words.json', 'w', encoding='utf-8') as file:
json.dump(words, file, ensure_ascii=False, indent=4)
| 28.032787
| 96
| 0.615205
|
1f1ba2ca6c4e40ffea83e4b88fd154cc076a042c
| 7,246
|
py
|
Python
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_partition_key_range_id_region_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_partition_key_range_id_region_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | null | null | null |
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_partition_key_range_id_region_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 1
|
2021-12-18T20:01:22.000Z
|
2021-12-18T20:01:22.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PartitionKeyRangeIdRegionOperations:
"""PartitionKeyRangeIdRegionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_metrics(
self,
resource_group_name: str,
account_name: str,
region: str,
database_rid: str,
collection_rid: str,
partition_key_range_id: str,
filter: str,
**kwargs
) -> AsyncIterable["_models.PartitionMetricListResult"]:
"""Retrieves the metrics determined by the given filter for the given partition key range id and
region.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param region: Cosmos DB region, with spaces between words and each word capitalized.
:type region: str
:param database_rid: Cosmos DB database rid.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid.
:type collection_rid: str
:param partition_key_range_id: Partition Key Range Id for which to get data.
:type partition_key_range_id: str
:param filter: An OData filter expression that describes a subset of metrics to return. The
parameters that can be filtered are name.value (name of the metric, can have an or of multiple
names), startTime, endTime, and timeGrain. The supported operator is eq.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PartitionMetricListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.PartitionMetricListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartitionMetricListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'region': self._serialize.url("region", region, 'str'),
'databaseRid': self._serialize.url("database_rid", database_rid, 'str'),
'collectionRid': self._serialize.url("collection_rid", collection_rid, 'str'),
'partitionKeyRangeId': self._serialize.url("partition_key_range_id", partition_key_range_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PartitionMetricListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/region/{region}/databases/{databaseRid}/collections/{collectionRid}/partitionKeyRangeId/{partitionKeyRangeId}/metrics'} # type: ignore
| 51.757143
| 301
| 0.664781
|
d14992d32d65ff2f475effd22bdbff3381006d35
| 5,684
|
py
|
Python
|
src/openfermion/hamiltonians/_chemical_series.py
|
josh146/OpenFermion
|
e64c17a8043b2ad0a60695b581293bf903ad91a3
|
[
"Apache-2.0"
] | 3
|
2018-08-03T22:48:47.000Z
|
2022-02-10T15:05:35.000Z
|
src/openfermion/hamiltonians/_chemical_series.py
|
josh146/OpenFermion
|
e64c17a8043b2ad0a60695b581293bf903ad91a3
|
[
"Apache-2.0"
] | null | null | null |
src/openfermion/hamiltonians/_chemical_series.py
|
josh146/OpenFermion
|
e64c17a8043b2ad0a60695b581293bf903ad91a3
|
[
"Apache-2.0"
] | 1
|
2019-09-02T00:50:58.000Z
|
2019-09-02T00:50:58.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create chemical series data sets."""
from __future__ import absolute_import
import numpy
from openfermion.hamiltonians._molecular_data import (MolecularData,
periodic_hash_table,
periodic_polarization)
# Define error objects which inherit from Exception.
class MolecularLatticeError(Exception):
pass
def make_atomic_ring(n_atoms, spacing, basis,
atom_type='H', charge=0, filename=''):
"""Function to create atomic rings with n_atoms.
Note that basic geometry suggests that for spacing L between atoms
the radius of the ring should be L / (2 * cos (pi / 2 - theta / 2))
Args:
n_atoms: Integer, the number of atoms in the ring.
spacing: The spacing between atoms in the ring in Angstroms.
basis: The basis in which to perform the calculation.
atom_type: String, the atomic symbol of the element in the ring.
this defaults to 'H' for Hydrogen.
charge: An integer giving the total molecular charge. Defaults to 0.
filename: An optional string to give a filename for the molecule.
Returns:
molecule: A an instance of the MolecularData class.
"""
# Make geometry.
geometry = []
theta = 2. * numpy.pi / float(n_atoms)
radius = spacing / (2. * numpy.cos(numpy.pi / 2. - theta / 2.))
for atom in range(n_atoms):
x_coord = radius * numpy.cos(atom * theta)
y_coord = radius * numpy.sin(atom * theta)
geometry += [(atom_type, (x_coord, y_coord, 0.))]
# Set multiplicity.
n_electrons = n_atoms * periodic_hash_table[atom_type]
n_electrons -= charge
if (n_electrons % 2):
multiplicity = 2
else:
multiplicity = 1
# Create molecule and return.
description = 'ring_{}'.format(spacing)
molecule = MolecularData(geometry,
basis,
multiplicity,
charge,
description,
filename)
return molecule
def make_atomic_lattice(nx_atoms, ny_atoms, nz_atoms, spacing, basis,
atom_type='H', charge=0, filename=''):
"""Function to create atomic lattice with n_atoms.
Args:
nx_atoms: Integer, the length of lattice (in number of atoms).
ny_atoms: Integer, the width of lattice (in number of atoms).
nz_atoms: Integer, the depth of lattice (in number of atoms).
spacing: The spacing between atoms in the lattice in Angstroms.
basis: The basis in which to perform the calculation.
atom_type: String, the atomic symbol of the element in the ring.
this defaults to 'H' for Hydrogen.
charge: An integer giving the total molecular charge. Defaults to 0.
filename: An optional string to give a filename for the molecule.
Returns:
molecule: A an instance of the MolecularData class.
Raises:
MolecularLatticeError: If lattice specification is invalid.
"""
# Make geometry.
geometry = []
for x_dimension in range(nx_atoms):
for y_dimension in range(ny_atoms):
for z_dimension in range(nz_atoms):
x_coord = spacing * x_dimension
y_coord = spacing * y_dimension
z_coord = spacing * z_dimension
geometry += [(atom_type, (x_coord, y_coord, z_coord))]
# Set multiplicity.
n_atoms = nx_atoms * ny_atoms * nz_atoms
n_electrons = n_atoms * periodic_hash_table[atom_type]
n_electrons -= charge
if (n_electrons % 2):
multiplicity = 2
else:
multiplicity = 1
# Name molecule.
dimensions = bool(nx_atoms > 1) + bool(ny_atoms > 1) + bool(nz_atoms > 1)
if dimensions == 1:
description = 'linear_{}'.format(spacing)
elif dimensions == 2:
description = 'planar_{}'.format(spacing)
elif dimensions == 3:
description = 'cubic_{}'.format(spacing)
else:
raise MolecularLatticeError('Invalid lattice dimensions.')
# Create molecule and return.
molecule = MolecularData(geometry,
basis,
multiplicity,
charge,
description,
filename)
return molecule
def make_atom(atom_type, basis, filename=''):
"""Prepare a molecular data instance for a single element.
Args:
atom_type: Float giving atomic symbol.
basis: The basis in which to perform the calculation.
Returns:
atom: An instance of the MolecularData class.
"""
geometry = [(atom_type, (0., 0., 0.))]
atomic_number = periodic_hash_table[atom_type]
spin = periodic_polarization[atomic_number] / 2.
multiplicity = int(2 * spin + 1)
atom = MolecularData(geometry,
basis,
multiplicity,
filename=filename)
return atom
| 36.670968
| 77
| 0.612597
|
2b265b05366ffe8ccf9b9b2383d96614f8d958ed
| 13,258
|
py
|
Python
|
airflow/jobs/local_task_job.py
|
kanga333/airflow
|
7864693e43c40fd8f0914c05f7e196a007d16d50
|
[
"Apache-2.0"
] | null | null | null |
airflow/jobs/local_task_job.py
|
kanga333/airflow
|
7864693e43c40fd8f0914c05f7e196a007d16d50
|
[
"Apache-2.0"
] | null | null | null |
airflow/jobs/local_task_job.py
|
kanga333/airflow
|
7864693e43c40fd8f0914c05f7e196a007d16d50
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import signal
from typing import Optional
import psutil
from sqlalchemy.exc import OperationalError
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.listeners.events import register_task_instance_state_events
from airflow.listeners.listener import get_listener_manager
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.sentry import Sentry
from airflow.stats import Stats
from airflow.task.task_runner import get_task_runner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import with_row_locks
from airflow.utils.state import State
class LocalTaskJob(BaseJob):
"""LocalTaskJob runs a single task instance."""
__mapper_args__ = {'polymorphic_identity': 'LocalTaskJob'}
def __init__(
self,
task_instance: TaskInstance,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
pickle_id: Optional[str] = None,
pool: Optional[str] = None,
external_executor_id: Optional[str] = None,
*args,
**kwargs,
):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.external_executor_id = external_executor_id
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super().__init__(*args, **kwargs)
def _execute(self):
self._enable_task_listeners()
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.task_runner.terminate()
self.handle_task_exit(128 + signum)
return
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance.check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool,
external_executor_id=self.external_executor_id,
):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
# Unmap the task _after_ it has forked/execed. (This is a bit of a kludge, but if we unmap before
# fork, then the "run_raw_task" command will see the mapping index and an Non-mapped task and
# fail)
if self.task_instance.task.is_mapped:
self.task_instance.task = self.task_instance.task.unmap()
heartbeat_time_limit = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# task callback invocation happens either here or in
# self.heartbeat() instead of taskinstance._run_raw_task to
# avoid race conditions
#
# When self.terminating is set to True by heartbeat_callback, this
# loop should not be restarted. Otherwise self.handle_task_exit
# will be invoked and we will end up with duplicated callbacks
while not self.terminating:
# Monitor the task to see if it's done. Wait in a syscall
# (`os.wait`) for as long as possible so we notice the
# subprocess finishing as quick as we can
max_wait_time = max(
0, # Make sure this value is never negative,
min(
(
heartbeat_time_limit
- (timezone.utcnow() - self.latest_heartbeat).total_seconds() * 0.75
),
self.heartrate,
),
)
return_code = self.task_runner.return_code(timeout=max_wait_time)
if return_code is not None:
self.handle_task_exit(return_code)
return
self.heartbeat()
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
# This can only really happen if the worker can't read the DB for a long time
time_since_last_heartbeat = (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limit exceeded!")
raise AirflowException(
f"Time since last heartbeat({time_since_last_heartbeat:.2f}s) exceeded limit "
f"({heartbeat_time_limit}s)."
)
finally:
self.on_kill()
def handle_task_exit(self, return_code: int) -> None:
"""Handle case where self.task_runner exits by itself or is externally killed"""
# Without setting this, heartbeat may get us
self.terminating = True
self.log.info("Task exited with return code %s", return_code)
self.task_instance.refresh_from_db()
if self.task_instance.state == State.RUNNING:
# This is for a case where the task received a SIGKILL
# while running or the task runner received a sigterm
self.task_instance.handle_failure(error=None)
# We need to check for error file
# in case it failed due to runtime exception/error
error = None
if self.task_instance.state != State.SUCCESS:
error = self.task_runner.deserialize_run_error()
self.task_instance._run_finished_callback(error=error)
if not self.task_instance.test_mode:
if conf.getboolean('scheduler', 'schedule_after_task_execution', fallback=True):
self._run_mini_scheduler_on_child_tasks()
self._update_dagrun_state_for_paused_dag()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
if not same_hostname:
self.log.warning(
"The recorded hostname %s does not match this instance's hostname %s",
ti.hostname,
fqdn,
)
raise AirflowException("Hostname of job runner does not match")
current_pid = self.task_runner.process.pid
recorded_pid = ti.pid
same_process = recorded_pid == current_pid
if ti.run_as_user or self.task_runner.run_as_user:
recorded_pid = psutil.Process(ti.pid).ppid()
same_process = recorded_pid == current_pid
if recorded_pid is not None and not same_process:
self.log.warning(
"Recorded pid %s does not match the current pid %s", recorded_pid, current_pid
)
raise AirflowException("PID of job runner does not match")
elif self.task_runner.return_code() is None and hasattr(self.task_runner, 'process'):
if ti.state == State.SKIPPED:
# A DagRun timeout will cause tasks to be externally marked as skipped.
dagrun = ti.get_dagrun(session=session)
execution_time = (dagrun.end_date or timezone.utcnow()) - dagrun.start_date
dagrun_timeout = ti.task.dag.dagrun_timeout
if dagrun_timeout and execution_time > dagrun_timeout:
self.log.warning("DagRun timed out after %s.", str(execution_time))
self.log.warning(
"State of this instance has been externally set to %s. Terminating instance.", ti.state
)
self.task_runner.terminate()
if ti.state == State.SUCCESS:
error = None
else:
# if ti.state is not set by taskinstance.handle_failure, then
# error file will not be populated and it must be updated by
# external source such as web UI
error = self.task_runner.deserialize_run_error() or "task marked as failed externally"
ti._run_finished_callback(error=error)
self.terminating = True
@provide_session
@Sentry.enrich_errors
def _run_mini_scheduler_on_child_tasks(self, session=None) -> None:
try:
# Re-select the row with a lock
dag_run = with_row_locks(
session.query(DagRun).filter_by(
dag_id=self.dag_id,
run_id=self.task_instance.run_id,
),
session=session,
).one()
task = self.task_instance.task
assert task.dag # For Mypy.
# Get a partial DAG with just the specific tasks we want to examine.
# In order for dep checks to work correctly, we include ourself (so
# TriggerRuleDep can check the state of the task we just executed).
partial_dag = task.dag.partial_subset(
task.downstream_task_ids,
include_downstream=True,
include_upstream=False,
include_direct_upstream=True,
)
dag_run.dag = partial_dag
info = dag_run.task_instance_scheduling_decisions(session)
skippable_task_ids = {
task_id for task_id in partial_dag.task_ids if task_id not in task.downstream_task_ids
}
schedulable_tis = [ti for ti in info.schedulable_tis if ti.task_id not in skippable_task_ids]
for schedulable_ti in schedulable_tis:
if not hasattr(schedulable_ti, "task"):
schedulable_ti.task = task.dag.get_task(schedulable_ti.task_id)
num = dag_run.schedule_tis(schedulable_tis)
self.log.info("%d downstream tasks scheduled from follow-on schedule check", num)
session.commit()
except OperationalError as e:
# Any kind of DB error here is _non fatal_ as this block is just an optimisation.
self.log.info(
"Skipping mini scheduling run due to exception: %s",
e.statement,
exc_info=True,
)
session.rollback()
@provide_session
def _update_dagrun_state_for_paused_dag(self, session=None):
"""
Checks for paused dags with DagRuns in the running state and
update the DagRun state if possible
"""
dag = self.task_instance.task.dag
if dag.get_is_paused():
dag_run = self.task_instance.get_dagrun(session=session)
if dag_run:
dag_run.dag = dag
dag_run.update_state(session=session, execute_callbacks=True)
@staticmethod
def _enable_task_listeners():
"""
Check if we have any registered listeners, then register sqlalchemy hooks for
TI state change if we do.
"""
if get_listener_manager().has_listeners:
register_task_instance_state_events()
| 42.49359
| 109
| 0.628451
|
92a2e34b4d6f7c5a9b54f1bfb5bae3c7b1a1e3b5
| 1,015
|
py
|
Python
|
cfgov/privacy/views.py
|
chosak/consumerfinance.gov
|
2007ac860ec7dfff0f94517f1a69e75299b97d20
|
[
"CC0-1.0"
] | null | null | null |
cfgov/privacy/views.py
|
chosak/consumerfinance.gov
|
2007ac860ec7dfff0f94517f1a69e75299b97d20
|
[
"CC0-1.0"
] | null | null | null |
cfgov/privacy/views.py
|
chosak/consumerfinance.gov
|
2007ac860ec7dfff0f94517f1a69e75299b97d20
|
[
"CC0-1.0"
] | null | null | null |
from django.urls import reverse_lazy
from django.views.generic import FormView
from privacy.forms import DisclosureConsentForm, RecordsAccessForm
class GetDisclosureConsentForm(FormView):
template_name = 'privacy/disclosure-consent-form.html'
form_class = DisclosureConsentForm
success_url = reverse_lazy('privacy:form_submitted')
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.send_email()
return super(GetDisclosureConsentForm, self).form_valid(form)
class GetRecordsAccessForm(FormView):
template_name = 'privacy/records-access-form.html'
form_class = RecordsAccessForm
success_url = reverse_lazy('privacy:form_submitted')
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.send_email()
return super(GetRecordsAccessForm, self).form_valid(form)
| 35
| 69
| 0.742857
|
65b806e759da9379dce56808582d41124e893662
| 3,720
|
py
|
Python
|
sdks/python/appcenter_sdk/models/CrashCounts.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/appcenter_sdk/models/CrashCounts.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/appcenter_sdk/models/CrashCounts.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class CrashCounts(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'integer',
'crashes': 'array'
}
attribute_map = {
'count': 'count',
'crashes': 'crashes'
}
def __init__(self, count=None, crashes=None): # noqa: E501
"""CrashCounts - a model defined in Swagger""" # noqa: E501
self._count = None
self._crashes = None
self.discriminator = None
if count is not None:
self.count = count
if crashes is not None:
self.crashes = crashes
@property
def count(self):
"""Gets the count of this CrashCounts. # noqa: E501
total crash count # noqa: E501
:return: The count of this CrashCounts. # noqa: E501
:rtype: integer
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this CrashCounts.
total crash count # noqa: E501
:param count: The count of this CrashCounts. # noqa: E501
:type: integer
"""
self._count = count
@property
def crashes(self):
"""Gets the crashes of this CrashCounts. # noqa: E501
the total crash count for day # noqa: E501
:return: The crashes of this CrashCounts. # noqa: E501
:rtype: array
"""
return self._crashes
@crashes.setter
def crashes(self, crashes):
"""Sets the crashes of this CrashCounts.
the total crash count for day # noqa: E501
:param crashes: The crashes of this CrashCounts. # noqa: E501
:type: array
"""
self._crashes = crashes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CrashCounts):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.76259
| 80
| 0.554839
|
a788afd153714f27468fe8e9e4adb2823b940b99
| 8,673
|
py
|
Python
|
spyder/plugins/editor/widgets/tests/test_codeeditor.py
|
mirazakon/spyder
|
c322d46af16a30aba089788a59a4e7db00ffb83f
|
[
"MIT"
] | 3
|
2019-09-27T21:00:00.000Z
|
2021-03-07T23:28:32.000Z
|
spyder/plugins/editor/widgets/tests/test_codeeditor.py
|
mirazakon/spyder
|
c322d46af16a30aba089788a59a4e7db00ffb83f
|
[
"MIT"
] | 3
|
2020-10-13T21:15:23.000Z
|
2020-10-13T21:15:24.000Z
|
spyder/plugins/editor/widgets/tests/test_codeeditor.py
|
mirazakon/spyder
|
c322d46af16a30aba089788a59a4e7db00ffb83f
|
[
"MIT"
] | 2
|
2021-04-30T01:18:22.000Z
|
2021-09-19T06:31:42.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
# Third party imports
from qtpy.QtCore import Qt, QEvent
from qtpy.QtGui import QFont, QTextCursor, QMouseEvent
from pytestqt import qtbot
import pytest
# Local imports
from spyder.plugins.editor.widgets.editor import codeeditor
from spyder.py3compat import PY2, PY3
# --- Fixtures
# -----------------------------------------------------------------------------
@pytest.fixture
def editorbot(qtbot):
widget = codeeditor.CodeEditor(None)
widget.setup_editor(linenumbers=True, markers=True, tab_mode=False,
font=QFont("Courier New", 10),
show_blanks=True, color_scheme='Zenburn',
scroll_past_end=True)
widget.setup_editor(language='Python')
qtbot.addWidget(widget)
widget.show()
return qtbot, widget
# --- Tests
# -----------------------------------------------------------------------------
# testing lowercase transformation functionality
def test_editor_upper_to_lower(editorbot):
qtbot, widget = editorbot
text = 'UPPERCASE'
widget.set_text(text)
cursor = widget.textCursor()
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor)
widget.setTextCursor(cursor)
widget.transform_to_lowercase()
new_text = widget.get_text('sof', 'eof')
assert text != new_text
def test_editor_lower_to_upper(editorbot):
qtbot, widget = editorbot
text = 'uppercase'
widget.set_text(text)
cursor = widget.textCursor()
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor)
widget.setTextCursor(cursor)
widget.transform_to_uppercase()
new_text = widget.get_text('sof', 'eof')
assert text != new_text
@pytest.mark.skipif(PY3, reason='Test only makes sense on Python 2.')
def test_editor_log_lsp_handle_errors(editorbot, capsys):
"""Test the lsp error handling / dialog report Python 2."""
qtbot, widget = editorbot
params = {
'params': {
'activeParameter': 'boo',
'signatures': {
'documentation': b'\x81',
'label': 'foo',
'parameters': {
'boo': {
'documentation': b'\x81',
'label': 'foo',
},
}
}
}
}
widget.process_signatures(params)
captured = capsys.readouterr()
test_1 = "Error when processing signature" in captured.err
test_2 = "codec can't decode byte 0x81" in captured.err
assert test_1 or test_2
@pytest.mark.skipif(PY2, reason="Python 2 strings don't have attached encoding.")
@pytest.mark.parametrize(
"input_text, expected_text, keys, strip_all",
[
("for i in range(2): ",
"for i in range(2): \n \n \n ",
[Qt.Key_Enter, Qt.Key_Enter, ' ', Qt.Key_Enter],
False),
('for i in range(2): ',
'for i in range(2):\n\n ',
[Qt.Key_Enter, Qt.Key_Enter],
True),
('myvar = 2 ',
'myvar = 2\n',
[Qt.Key_Enter],
True),
('somecode = 1\nmyvar = 2 \nmyvar = 3',
'somecode = 1\nmyvar = 2 \nmyvar = 3',
[' ', Qt.Key_Up, Qt.Key_Up],
True),
('somecode = 1\nmyvar = 2 ',
'somecode = 1\nmyvar = 2 ',
[Qt.Key_Left],
True),
('"""This is a string with important spaces\n ',
'"""This is a string with important spaces\n \n',
[Qt.Key_Enter],
True),
('"""string ',
'"""string \n',
[Qt.Key_Enter],
True),
('somecode = 1\nmyvar = 2',
'somecode = 1\nmyvar = 2',
[' ', (Qt.LeftButton, 0)],
True),
('somecode = 1\nmyvar = 2',
'somecode = 1\nmyvar = 2 ',
[' ', (Qt.LeftButton, 23)],
True),
('a=1\na=2 \na=3',
'a=1\na=2 \na=3',
[(Qt.LeftButton, 6), Qt.Key_Up],
True),
('def fun():\n """fun',
'def fun():\n """fun\n\n ',
[Qt.Key_Enter, Qt.Key_Enter],
True),
('def fun():\n """fun',
'def fun():\n """fun\n \n ',
[Qt.Key_Enter, Qt.Key_Enter],
False),
("('🚫')",
"('🚫')\n",
[Qt.Key_Enter],
True),
("def fun():",
"def fun():\n\n ",
[Qt.Key_Enter, Qt.Key_Enter],
True),
("def fun():",
"def fun():\n\n\n",
[Qt.Key_Enter, Qt.Key_Enter, Qt.Key_Enter],
True),
("def fun():\n i = 0\n# no indent",
"def fun():\n i = 0\n# no indent\n",
[Qt.Key_Enter],
True),
("if a:\n def b():\n i = 1",
"if a:\n def b():\n i = 1\n\n ",
[Qt.Key_Enter, Qt.Key_Enter, Qt.Key_Backspace],
True),
])
def test_editor_rstrip_keypress(editorbot, input_text, expected_text, keys,
strip_all):
"""
Test that whitespace is removed when leaving a line.
"""
qtbot, widget = editorbot
widget.strip_trailing_spaces_on_modify = strip_all
widget.set_text(input_text)
cursor = widget.textCursor()
cursor.movePosition(QTextCursor.End)
widget.setTextCursor(cursor)
for key in keys:
if isinstance(key, tuple):
# Mouse event
button, position = key
cursor = widget.textCursor()
cursor.setPosition(position)
xypos = widget.cursorRect(cursor).center()
widget.mousePressEvent(QMouseEvent(
QEvent.MouseButtonPress, xypos,
button, button,
Qt.NoModifier))
else:
qtbot.keyPress(widget, key)
assert widget.toPlainText() == expected_text
@pytest.mark.parametrize(
"input_text, expected_state", [
("'string ", [True, False]),
('"string ', [True, False]),
("'string \\", [True, True]),
('"string \\', [True, True]),
("'string \\ ", [True, False]),
('"string \\ ', [True, False]),
("'string ' ", [False, False]),
('"string " ', [False, False]),
("'string \"", [True, False]),
('"string \'', [True, False]),
("'string \" ", [True, False]),
('"string \' ', [True, False]),
("'''string ", [True, True]),
('"""string ', [True, True]),
("'''string \\", [True, True]),
('"""string \\', [True, True]),
("'''string \\ ", [True, True]),
('"""string \\ ', [True, True]),
("'''string ''' ", [False, False]),
('"""string """ ', [False, False]),
("'''string \"\"\"", [True, True]),
('"""string \'\'\'', [True, True]),
("'''string \"\"\" ", [True, True]),
('"""string \'\'\' ', [True, True]),
])
def test_in_string(editorbot, input_text, expected_state):
"""
Test that in_string works correctly.
"""
qtbot, widget = editorbot
widget.set_text(input_text + '\n ')
cursor = widget.textCursor()
for blanks_enabled in [True, False]:
widget.set_blanks_enabled(blanks_enabled)
cursor.setPosition(len(input_text))
assert cursor.position() == len(input_text)
assert widget.in_string(cursor) == expected_state[0]
cursor.setPosition(len(input_text) + 3)
assert widget.in_string(cursor) == expected_state[1]
@pytest.mark.skipif(PY2, reason="Doesn't work with python 2 on travis.")
def test_comment(editorbot):
"""
Test that in_string works correctly.
"""
qtbot, widget = editorbot
widget.set_text("import numpy")
cursor = widget.textCursor()
cursor.setPosition(8)
cursor.setPosition(11, QTextCursor.KeepAnchor)
widget.setTextCursor(cursor)
widget.toggle_comment()
assert widget.toPlainText() == "# import numpy"
widget.toggle_comment()
assert widget.toPlainText() == "import numpy"
def test_undo_return(editorbot):
"""Test that we can undo a return."""
qtbot, editor = editorbot
text = "if True:\n 0"
returned_text = "if True:\n 0\n "
editor.set_text(text)
cursor = editor.textCursor()
cursor.setPosition(14)
editor.setTextCursor(cursor)
qtbot.keyPress(editor, Qt.Key_Return)
assert editor.toPlainText() == returned_text
qtbot.keyPress(editor, "z", modifier=Qt.ControlModifier)
assert editor.toPlainText() == text
if __name__ == '__main__':
pytest.main(['test_codeeditor.py'])
| 32.241636
| 81
| 0.536608
|
5d9de42d00130c0a9b14f4a73eccefb245e96eef
| 2,969
|
py
|
Python
|
dataloaders/data_writer_seq.py
|
zhxtu/ours_video
|
2762501e4d3795872ffabc49fa3c73fdde10af8b
|
[
"MIT"
] | null | null | null |
dataloaders/data_writer_seq.py
|
zhxtu/ours_video
|
2762501e4d3795872ffabc49fa3c73fdde10af8b
|
[
"MIT"
] | null | null | null |
dataloaders/data_writer_seq.py
|
zhxtu/ours_video
|
2762501e4d3795872ffabc49fa3c73fdde10af8b
|
[
"MIT"
] | null | null | null |
import os
from glob import glob
import random
# import numpy as np
image_root= '/hdd2/zy/Dataset/ThermalData/'
train_image_list=glob(os.path.join(image_root, 'Img8bit', 'train', '*', '*.bmp'))
# valinfo=open('/hdd2/zy/Dataset/ThermalData/val.txt','w')
# val_image_list=glob(os.path.join(image_root, 'Img8bit', 'val', '*', '*.bmp'))
# for i in range(len(val_image_list)):
# imgpath=val_image_list[i]
# imgpath='/'+imgpath[imgpath.index('Img8bit'):]
# vid=imgpath.split('/')[-2]
# name=imgpath.split('/')[-1]
# gtname=name.split('_')[0]+'_'+name.split('_')[1]+'_'+name.split('_')[2]+'_gtLabelIds.png'
# gtpath=os.path.join('/gtFine', 'val', vid, gtname)
# valinfo.write(imgpath)
# valinfo.write('\t')
# valinfo.write(gtpath)
# valinfo.write('\r\n')
#
# testinfo=open('/hdd2/zy/Dataset/ThermalData/test.txt','w')
# test_image_list=glob(os.path.join(image_root, 'Img8bit', 'test', '*', '*.bmp'))
# for i in range(len(test_image_list)):
# imgpath=test_image_list[i]
# imgpath = '/' + imgpath[imgpath.index('Img8bit'):]
# vid=imgpath.split('/')[-2]
# name=imgpath.split('/')[-1]
# gtname=name.split('_')[0]+'_'+name.split('_')[1]+'_'+name.split('_')[2]+'_gtLabelIds.png'
# gtpath=os.path.join('/gtFine', 'test', vid, gtname)
# testinfo.write(imgpath)
# testinfo.write('\t')
# testinfo.write(gtpath)
# testinfo.write('\r\n')
subnum=1
l = open('./thermalseq_splits1/1374_train_unsupervised.txt','w')
u = open('./thermalseq_splits1/1-{}_train_unsupervised.txt'.format(2*subnum),'w')
for i in range(len(train_image_list)):
imgpath = train_image_list[i]
imgpath = '/' + imgpath[imgpath.index('Img8bit'):]
vid = imgpath.split('/')[-2]
vid_info = imgpath.split('/')[-1].split('_')
city, seq, cur_frame = vid_info[0], vid_info[1], vid_info[2]
f4_id = int(cur_frame)
f4_path = os.path.join('/IR_sequence', 'train', 'DJI_' + seq, ("%s_%s_%06d.bmp" % (city, seq, f4_id)))
# f4_path = os.path.join('/IR_sequence', 'train', 'DJI_' + seq, ("%s_%s_%06d_IRframes.bmp" % (city, seq, f4_id)))
gt_path=os.path.join('/gtFine', 'train', 'DJI_' + seq, ("%s_%s_%06d_gtLabelIds.png" % (city, seq, f4_id)))
l.write(f4_path)
l.write('\t')
l.write(gt_path)
l.write('\r\n')
for j in range(subnum):
f3_id = f4_id - (j+1)*random.randint(1, 2)
f5_id = f4_id + (j+1)*random.randint(1, 2)
f3_path = os.path.join('/IR_sequence', 'train', 'DJI_' + seq, ("%s_%s_%06d.bmp" % (city, seq, f3_id)))
f5_path = os.path.join('/IR_sequence', 'train', 'DJI_' + seq, ("%s_%s_%06d.bmp" % (city, seq, f5_id)))
# f3_path = os.path.join('/IR_sequence', 'train', 'DJI_' + seq, ("%s_%s_%06d_IRframes.bmp" % (city, seq, f3_id)))
# f5_path = os.path.join('/IR_sequence', 'train', 'DJI_' + seq, ("%s_%s_%06d_IRframes.bmp" % (city, seq, f5_id)))
u.write(f3_path)
u.write('\r\n')
u.write(f5_path)
u.write('\r\n')
| 45.676923
| 121
| 0.607949
|
b069b3750b0ad84cc2ef7484fb6b0714c154e66d
| 15,386
|
py
|
Python
|
lib/pygments/lexers/shell.py
|
neelie/sublime-evernote
|
05d82a919cab4b01f5a10c0566b8b487a149b443
|
[
"MIT"
] | null | null | null |
lib/pygments/lexers/shell.py
|
neelie/sublime-evernote
|
05d82a919cab4b01f5a10c0566b8b487a149b443
|
[
"MIT"
] | null | null | null |
lib/pygments/lexers/shell.py
|
neelie/sublime-evernote
|
05d82a919cab4b01f5a10c0566b8b487a149b443
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pygments.lexers.shell
~~~~~~~~~~~~~~~~~~~~~
Lexers for various shells.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, include
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
'PowerShellLexer', 'ShellSessionLexer']
line_re = re.compile('.*?\n')
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
.. versionadded:: 0.6
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
'.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)(\s*)\b',
bygroups(Keyword, Text)),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<<', Operator), # here-string
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Punctuation),
(r'&', Punctuation),
(r'\|', Punctuation),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
if shebang_matches(text, r'(ba|z|)sh'):
return 1
if text.startswith('$ '):
return 0.2
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
.. versionadded:: 1.1
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)'
r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
.. versionadded:: 1.6
"""
name = 'Shell Session'
aliases = ['shell-session']
filenames = ['*.shell-session']
mimetypes = ['application/x-sh-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\[?\S+@[^$#%]+\]?\s*)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
.. versionadded:: 0.7
"""
name = 'Batchfile'
aliases = ['bat', 'batch', 'dosbatch', 'winbatch']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'\b(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'\b(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
.. versionadded:: 0.10
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
r'source|stop|suspend|source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
.. versionadded:: 1.5
"""
name = 'PowerShell'
aliases = ['powershell', 'posh', 'ps1', 'psm1']
filenames = ['*.ps1','*.psm1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
keywords = (
'while validateset validaterange validatepattern validatelength '
'validatecount until trap switch return ref process param parameter in '
'if global: function foreach for finally filter end elseif else '
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
'valuefromremainingarguments helpmessage try catch throw').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
'lt match ne not notcontains notlike notmatch or regex replace '
'wildcard').split()
verbs = (
'write where wait use update unregister undo trace test tee take '
'suspend stop start split sort skip show set send select scroll resume '
'restore restart resolve resize reset rename remove register receive '
'read push pop ping out new move measure limit join invoke import '
'group get format foreach export expand exit enter enable disconnect '
'disable debug cxnew copy convertto convertfrom convert connect '
'complete compare clear checkpoint aggregate add').split()
commenthelp = (
'component description example externalhelp forwardhelpcategory '
'forwardhelptargetname functionality inputs link '
'notes outputs parameter remotehelprunspace role synopsis').split()
tokens = {
'root': [
# we need to count pairs of parentheses for correct highlight
# of '$(...)' blocks in strings
(r'\(', Punctuation, 'child'),
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(<|<)#', Comment.Multiline, 'multline'),
(r'@"\n', String.Heredoc, 'heredoc-double'),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
(r'"', String.Double, 'string'),
(r"'([^']|'')*'", String.Single),
(r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+',
Name.Variable),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'-(%s)\b' % '|'.join(operators), Operator),
(r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin),
(r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_][a-z0-9_]*', Name),
(r'\w+', Name),
(r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation),
],
'child': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
(r'#(>|>)', Comment.Multiline, '#pop'),
(r'\.(%s)' % '|'.join(commenthelp), String.Doc),
(r'[#&.]', Comment.Multiline),
],
'string': [
(r"`[0abfnrtv'\"\$`]", String.Escape),
(r'[^$`"]+', String.Double),
(r'\$\(', Punctuation, 'child'),
(r'""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
'heredoc-double': [
(r'\n"@', String.Heredoc, '#pop'),
(r'\$\(', Punctuation, 'child'),
(r'[^@\n]+"]', String.Heredoc),
(r".", String.Heredoc),
]
}
| 36.202353
| 84
| 0.472962
|
e5ab6f8b395707df749ca2fb722820e46a690f9f
| 418
|
py
|
Python
|
airbyte-integrations/connectors/source-amazon-ads/source_amazon_ads/constants.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 6,215
|
2020-09-21T13:45:56.000Z
|
2022-03-31T21:21:45.000Z
|
airbyte-integrations/connectors/source-amazon-ads/source_amazon_ads/constants.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 8,448
|
2020-09-21T00:43:50.000Z
|
2022-03-31T23:56:06.000Z
|
airbyte-integrations/connectors/source-amazon-ads/source_amazon_ads/constants.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 1,251
|
2020-09-20T05:48:47.000Z
|
2022-03-31T10:41:29.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from enum import Enum
class AmazonAdsRegion(str, Enum):
NA = "NA"
EU = "EU"
FE = "FE"
SANDBOX = "SANDBOX"
URL_MAPPING = {
"NA": "https://advertising-api.amazon.com/",
"EU": "https://advertising-api-eu.amazon.com/",
"FE": "https://advertising-api-fe.amazon.com/",
"SANDBOX": "https://advertising-api-test.amazon.com/",
}
| 19.904762
| 58
| 0.61244
|
d67cea65b0eec34217058de9fda3352cf77f207f
| 9,523
|
py
|
Python
|
neutron/conf/common.py
|
kklimonda/neutron
|
ccdddad358a4bf802d59b3fbbfe88a1e9881c96c
|
[
"Apache-2.0"
] | null | null | null |
neutron/conf/common.py
|
kklimonda/neutron
|
ccdddad358a4bf802d59b3fbbfe88a1e9881c96c
|
[
"Apache-2.0"
] | null | null | null |
neutron/conf/common.py
|
kklimonda/neutron
|
ccdddad358a4bf802d59b3fbbfe88a1e9881c96c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.utils import net
from oslo_config import cfg
from oslo_service import wsgi
from neutron._i18n import _
from neutron.common import constants
core_opts = [
cfg.HostAddressOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to.")),
cfg.PortOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions. "
"Note that this can be a colon-separated list of paths. "
"For example: api_extensions_path = "
"extensions:/path/to/more/exts:/even/more/exts. "
"The __path__ of neutron.extensions is appended to "
"this, so if your extensions are in there you don't "
"need to specify them here.")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs. "
"The first 3 octets will remain unchanged. If the 4th "
"octet is not 00, it will also be used. The others "
"will be randomly generated.")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.ListOpt('default_availability_zones', default=[],
help=_("Default value of availability zone hints. The "
"availability zone aware schedulers use this when "
"the resources availability_zone_hints is empty. "
"Multiple availability zones can be specified by a "
"comma separated string. This value can be empty. "
"In this case, even if availability_zone_hints for "
"a resource is empty, availability zone is "
"considered for high availability while scheduling "
"the resource.")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers per subnet")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.BoolOpt('ipv6_pd_enabled', default=False,
help=_("Enables IPv6 Prefix Delegation for automatic subnet "
"CIDR allocation. "
"Set to True to enable IPv6 Prefix Delegation for "
"subnet allocation in a PD-capable environment. Users "
"making subnet creation requests for IPv6 subnets "
"without providing a CIDR or subnetpool ID will be "
"given a CIDR via the Prefix Delegation mechanism. "
"Note that enabling PD will override the behavior of "
"the default IPv6 subnetpool.")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('external_dns_driver',
help=_('Driver for external DNS integration.')),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron. "
"Attention: the following parameter MUST be set to "
"False if Neutron is being used in conjunction with "
"Nova security groups.")),
cfg.HostAddressOpt('host', default=net.get_hostname(),
sample_default='example.domain',
help=_("Hostname to be used by the Neutron server, "
"agents and services running on this machine. "
"All the agents and services running on this "
"machine must use the same host value.")),
cfg.StrOpt("network_link_prefix",
help=_("This string is prepended to the normal URL that is "
"returned in links to the OpenStack Network API. If it "
"is empty (the default), the URLs are returned "
"unchanged.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.StrOpt('ipam_driver', default='internal',
help=_("Neutron IPAM (IP address management) driver to use. "
"By default, the reference implementation of the "
"Neutron IPAM driver is used.")),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
cfg.StrOpt('web_framework', default='pecan',
deprecated_for_removal=True,
choices=('legacy', 'pecan'),
help=_("This will choose the web framework in which to run "
"the Neutron API server. 'pecan' is a new "
"rewrite of the API routing components.")),
cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU,
deprecated_name='segment_mtu', deprecated_group='ml2',
help=_('MTU of the underlying physical network. Neutron uses '
'this value to calculate MTU for all virtual network '
'components. For flat and VLAN networks, neutron uses '
'this value without modification. For overlay networks '
'such as VXLAN, neutron automatically subtracts the '
'overlay protocol overhead from this value. Defaults '
'to 1500, the standard value for Ethernet.'))
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
def register_core_common_config_opts(cfg=cfg.CONF):
cfg.register_opts(core_opts)
cfg.register_cli_opts(core_cli_opts)
wsgi.register_opts(cfg)
NOVA_CONF_SECTION = 'nova'
nova_opts = [
cfg.StrOpt('region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.StrOpt('endpoint_type',
default='public',
choices=['public', 'admin', 'internal'],
help=_('Type of the nova endpoint to use. This endpoint will'
' be looked up in the keystone catalog and should be'
' one of public, internal or admin.')),
]
def register_nova_opts(cfg=cfg.CONF):
cfg.register_opts(nova_opts, group=NOVA_CONF_SECTION)
PLACEMENT_CONF_SECTION = 'placement'
placement_opts = [
cfg.StrOpt('region_name',
help=_('Name of placement region to use. Useful if keystone '
'manages more than one region.')),
cfg.StrOpt('endpoint_type',
default='public',
choices=['public', 'admin', 'internal'],
help=_('Type of the placement endpoint to use. This endpoint '
'will be looked up in the keystone catalog and should '
'be one of public, internal or admin.')),
]
def register_placement_opts(cfg=cfg.CONF):
cfg.register_opts(placement_opts, group=PLACEMENT_CONF_SECTION)
| 50.925134
| 79
| 0.587735
|
6df59ee8696978d160f0003c99eee4bd7d579b4c
| 7,286
|
py
|
Python
|
core/main.py
|
cmdrcryme/robophiz
|
c9db8816f323a5071e72d5b68c2c24534f7f76f5
|
[
"Unlicense"
] | 2
|
2016-10-22T20:45:57.000Z
|
2017-08-06T20:35:43.000Z
|
core/main.py
|
cmdrcryme/robophiz
|
c9db8816f323a5071e72d5b68c2c24534f7f76f5
|
[
"Unlicense"
] | 9
|
2015-01-12T21:33:24.000Z
|
2018-04-02T15:59:22.000Z
|
core/main.py
|
cmdrcryme/robophiz
|
c9db8816f323a5071e72d5b68c2c24534f7f76f5
|
[
"Unlicense"
] | 1
|
2017-08-01T22:13:33.000Z
|
2017-08-01T22:13:33.000Z
|
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import re
import _thread
import traceback
from queue import Queue
from future.builtins import str
_thread.stack_size(1024 * 512) # reduce vm size
class Input(dict):
def __init__(self, conn, raw, prefix, command, params,
nick, user, host, paraml, msg):
server = conn.server_host
chan = paraml[0].lower()
if chan == conn.nick.lower(): # is a PM
chan = nick
def say(msg):
conn.msg(chan, msg)
def reply(msg):
if chan == nick: # PMs don't need prefixes
self.say(msg)
else:
self.say(nick + ': ' + msg)
def pm(msg, nick=nick):
conn.msg(nick, msg)
def set_nick(nick):
conn.set_nick(nick)
def me(msg):
self.say("\x01%s %s\x01" % ("ACTION", msg))
def notice(msg):
conn.cmd('NOTICE', [nick, msg])
def kick(target=None, reason=None):
conn.cmd('KICK', [chan, target or nick, reason or ''])
def ban(target=None):
conn.cmd('MODE', [chan, '+b', target or host])
def unban(target=None):
conn.cmd('MODE', [chan, '-b', target or host])
dict.__init__(self, conn=conn, raw=raw, prefix=prefix, command=command,
params=params, nick=nick, user=user, host=host,
paraml=paraml, msg=msg, server=server, chan=chan,
notice=notice, say=say, reply=reply, pm=pm, bot=bot,
kick=kick, ban=ban, unban=unban, me=me,
set_nick=set_nick, lastparam=paraml[-1])
# make dict keys accessible as attributes
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def run(func, input):
args = func._args
if 'inp' not in input:
input.inp = input.paraml
if args:
if 'db' in args and 'db' not in input:
input.db = get_db_connection(input.conn)
if 'input' in args:
input.input = input
if 0 in args:
out = func(input.inp, **input)
else:
kw = dict((key, input[key]) for key in args if key in input)
out = func(input.inp, **kw)
else:
out = func(input.inp)
if out is not None:
input.reply(str(out))
def do_sieve(sieve, bot, input, func, type, args):
try:
return sieve(bot, input, func, type, args)
except Exception:
print('sieve error', end=' ')
traceback.print_exc()
return None
class Handler(object):
'''Runs plugins in their own threads (ensures order)'''
def __init__(self, func):
self.func = func
self.input_queue = Queue()
_thread.start_new_thread(self.start, ())
def start(self):
uses_db = 'db' in self.func._args
db_conns = {}
while True:
input = self.input_queue.get()
if input == StopIteration:
break
if uses_db:
db = db_conns.get(input.conn)
if db is None:
db = bot.get_db_connection(input.conn)
db_conns[input.conn] = db
input.db = db
try:
run(self.func, input)
except:
traceback.print_exc()
def stop(self):
self.input_queue.put(StopIteration)
def put(self, value):
self.input_queue.put(value)
def dispatch(input, kind, func, args, autohelp=False):
for sieve, in bot.plugs['sieve']:
input = do_sieve(sieve, bot, input, func, kind, args)
if input == None:
return
if autohelp and args.get('autohelp', True) and not input.inp \
and func.__doc__ is not None:
input.reply(func.__doc__)
return
if hasattr(func, '_apikeys'):
bot_keys = bot.config.get('api_keys', {})
keys = {key: bot_keys.get(key) for key in func._apikeys}
missing = [keyname for keyname, value in keys.items() if value is None]
if missing:
input.reply('error: missing api keys - {}'.format(missing))
return
# Return a single key as just the value, and multiple keys as a dict.
if len(keys) == 1:
input.api_key = list(keys.values())[0]
else:
input.api_key = keys
if func._thread:
bot.threads[func].put(input)
else:
_thread.start_new_thread(run, (func, input))
def match_command(command):
commands = list(bot.commands)
# do some fuzzy matching
prefix = [x for x in commands if x.startswith(command)]
if len(prefix) == 1:
return prefix[0]
elif prefix and command not in prefix:
return prefix
return command
def make_command_re(bot_prefix, is_private, bot_nick):
if not isinstance(bot_prefix, list):
bot_prefix = [bot_prefix]
if is_private:
bot_prefix.append('') # empty prefix
bot_prefix = '|'.join(re.escape(p) for p in bot_prefix)
bot_prefix += '|' + bot_nick + r'[:,]+\s+'
command_re = r'(?:%s)(\w+)(?:$|\s+)(.*)' % bot_prefix
return re.compile(command_re)
def test_make_command_re():
match = make_command_re('.', False, 'bot').match
assert not match('foo')
assert not match('bot foo')
for _ in range(2):
assert match('.test').groups() == ('test', '')
assert match('bot: foo args').groups() == ('foo', 'args')
match = make_command_re('.', True, 'bot').match
assert match('foo').groups() == ('foo', '')
match = make_command_re(['.', '!'], False, 'bot').match
assert match('!foo args').groups() == ('foo', 'args')
def main(conn, out):
inp = Input(conn, *out)
# EVENTS
for func, args in bot.events[inp.command] + bot.events['*']:
dispatch(Input(conn, *out), "event", func, args)
if inp.command == 'PRIVMSG':
# COMMANDS
config_prefix = bot.config.get("prefix", ".")
is_private = inp.chan == inp.nick # no prefix required
command_re = make_command_re(config_prefix, is_private, inp.conn.nick)
m = command_re.match(inp.lastparam)
if m:
trigger = m.group(1).lower()
command = match_command(trigger)
if isinstance(command, list): # multiple potential matches
input = Input(conn, *out)
input.reply("did you mean %s or %s?" %
(', '.join(command[:-1]), command[-1]))
elif command in bot.commands:
input = Input(conn, *out)
input.trigger = trigger
input.inp_unstripped = m.group(2)
input.inp = input.inp_unstripped.strip()
func, args = bot.commands[command]
dispatch(input, "command", func, args, autohelp=True)
# REGEXES
for func, args in bot.plugs['regex']:
m = args['re'].search(inp.lastparam)
if m:
input = Input(conn, *out)
input.inp = m
dispatch(input, "regex", func, args)
| 29.738776
| 79
| 0.551194
|
d228d237498a27006e3d521dbba68191ca8f0154
| 3,653
|
py
|
Python
|
backend/api/api/settings/base.py
|
lanoir42/makefeeconverge
|
61456d581a415257ebeb6b1d102820a164ded913
|
[
"MIT"
] | null | null | null |
backend/api/api/settings/base.py
|
lanoir42/makefeeconverge
|
61456d581a415257ebeb6b1d102820a164ded913
|
[
"MIT"
] | 5
|
2022-03-04T14:53:43.000Z
|
2022-03-21T00:00:28.000Z
|
backend/api/api/settings/base.py
|
lanoir42/makefeeconverge
|
61456d581a415257ebeb6b1d102820a164ded913
|
[
"MIT"
] | null | null | null |
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
from decouple import Config, RepositoryEnv, config
if "DOTENV_FILE" in os.environ:
DOTENV_FILE = os.getenv("DOTENV_FILE", ".env")
print(f"LOADED DOTENV_FILE : {DOTENV_FILE}")
config = Config(RepositoryEnv(DOTENV_FILE))
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "api.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "api.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": config("PG_DATABASE"),
"USER": config("PG_USER"),
"PASSWORD": config("PG_PASSWORD"),
"HOST": config("PG_HOST"),
"PORT": config("PG_PORT"),
"TEST": {
"NAME": "test",
},
},
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| 26.471014
| 91
| 0.68738
|
d7bd1acf1376deb1879adda0d40cf20d611d057f
| 739
|
py
|
Python
|
tests/factories.py
|
Anioko/reusable-cms
|
52e2a2f11a92c596bd13812d5fd14dffdcdcaa7f
|
[
"MIT"
] | null | null | null |
tests/factories.py
|
Anioko/reusable-cms
|
52e2a2f11a92c596bd13812d5fd14dffdcdcaa7f
|
[
"MIT"
] | 122
|
2020-12-31T06:31:11.000Z
|
2022-03-18T14:12:03.000Z
|
tests/factories.py
|
Anioko/reusable-cms
|
52e2a2f11a92c596bd13812d5fd14dffdcdcaa7f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from mycms.database import db
from mycms.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: f"user{n}")
email = Sequence(lambda n: f"user{n}@example.com")
password = PostGenerationMethodCall("set_password", "example")
active = True
class Meta:
"""Factory configuration."""
model = User
| 23.09375
| 66
| 0.67659
|
1eb972e0bf64dc564da65cb1e397d2e0f96ffb6f
| 2,017
|
py
|
Python
|
e2e/tests/api_tests.py
|
NHSDigital/canary-api
|
21ea101fd6034d24fd0fcd0b5b58a4a9dce0b094
|
[
"MIT"
] | null | null | null |
e2e/tests/api_tests.py
|
NHSDigital/canary-api
|
21ea101fd6034d24fd0fcd0b5b58a4a9dce0b094
|
[
"MIT"
] | 94
|
2020-12-15T10:32:59.000Z
|
2022-03-29T13:08:29.000Z
|
e2e/tests/api_tests.py
|
NHSDigital/canary-api
|
21ea101fd6034d24fd0fcd0b5b58a4a9dce0b094
|
[
"MIT"
] | 1
|
2021-04-11T07:32:07.000Z
|
2021-04-11T07:32:07.000Z
|
import pytest
import os
from aiohttp import ClientResponse
from api_test_utils import env
from api_test_utils import poll_until
from api_test_utils.api_session_client import APISessionClient
from api_test_utils.api_test_session_config import APITestSessionConfig
@pytest.mark.smoketest
@pytest.mark.asyncio
async def test_wait_for_ping(api_client: APISessionClient, api_test_config: APITestSessionConfig):
async def _is_complete(resp: ClientResponse):
if resp.status != 200:
return False
body = await resp.json()
return body.get("commitId") == api_test_config.commit_id
await poll_until(
make_request=lambda: api_client.get('_ping'),
until=_is_complete,
timeout=60
)
@pytest.mark.smoketest
@pytest.mark.asyncio
async def test_check_status_is_secured(api_client: APISessionClient):
async with api_client.get("_status", allow_retries=True) as resp:
assert resp.status == 401
@pytest.mark.smoketest
@pytest.mark.asyncio
async def test_wait_for_status(api_client: APISessionClient, api_test_config: APITestSessionConfig):
async def _is_complete(resp: ClientResponse):
if resp.status != 200:
return False
body = await resp.json()
version_info = body.get('_version')
if not version_info:
return False
return version_info.get("commitId") == api_test_config.commit_id
await poll_until(
make_request=lambda: api_client.get('_status', headers={"apikey": env.status_endpoint_api_key()}),
until=_is_complete,
timeout=60
)
@pytest.mark.smoketest
@pytest.mark.asyncio
async def test_api_status_with_service_header_another_service(api_client: APISessionClient):
resp = await api_client.get("_status", allow_retries=True, max_retries=5, headers={'x-apim-service': 'another-service', "apikey": env.status_endpoint_api_key()})
assert resp.status == 200
body = await resp.json()
assert body.get('service') == 'canary'
| 30.104478
| 165
| 0.728805
|
c0d550f84dc20abb960b19f7e1f08e7280310977
| 5,537
|
py
|
Python
|
torch/fx/experimental/fx_acc/acc_utils.py
|
rraminen/pytorch
|
f7d5d02a1d873ef61b10186225704b2b283a989a
|
[
"Intel"
] | 1
|
2021-10-16T17:44:47.000Z
|
2021-10-16T17:44:47.000Z
|
torch/fx/experimental/fx_acc/acc_utils.py
|
rraminen/pytorch
|
f7d5d02a1d873ef61b10186225704b2b283a989a
|
[
"Intel"
] | null | null | null |
torch/fx/experimental/fx_acc/acc_utils.py
|
rraminen/pytorch
|
f7d5d02a1d873ef61b10186225704b2b283a989a
|
[
"Intel"
] | 1
|
2022-01-19T10:55:49.000Z
|
2022-01-19T10:55:49.000Z
|
import inspect
import json
import os
from typing import Any, Tuple, Callable, Union, Dict, List, Optional
import re
import torch
import torch.fx
from torch.fx.passes.graph_manipulation import (
serialize_module,
)
from torch.fx.graph_module import GraphModule
from torch.fx.node import _get_qualified_name
from torch.fx.passes import graph_drawer
from torch.fx.passes.shape_prop import TensorMetadata
def is_acc_op(node_or_target: Union[Callable, torch.fx.Node]) -> bool:
"""
Returns whether `node_or_target` is an acc_op. If it's a node, then checks whether
it's a call_function target is from the acc_ops module. Otherwise it's already
the target, which is similarly checked to see if it's from the acc_ops module.
"""
if isinstance(node_or_target, torch.fx.Node):
# All acc_ops are call_functions.
if node_or_target.op != "call_function":
return False
target = node_or_target.target
else:
target = node_or_target
return "acc_ops" in target.__module__
def is_acc_op_with_kwarg(
node_or_target: Union[Callable, torch.fx.Node], kwarg: str
) -> bool:
"""
Helper that inspects `node_or_target` and returns whether it is an acc_op node
(or a target for an acc_op) that has an arg signature that includes `kwarg`.
"""
if not is_acc_op(node_or_target):
return False
target = (
node_or_target.target
if isinstance(node_or_target, torch.fx.Node)
else node_or_target
)
assert not isinstance(target, str)
return kwarg in inspect.signature(inspect.unwrap(target)).parameters
def get_field_from_acc_out_ty(
acc_out_ty_or_dict: Union[Tuple, Dict[str, Any]], field: str
):
"""
After tracing NamedTuple inputs are converted to standard tuples, so we cannot
access them by name directly. Use this helper instead.
"""
if isinstance(acc_out_ty_or_dict, dict):
acc_out_ty = acc_out_ty_or_dict["acc_out_ty"]
else:
acc_out_ty = acc_out_ty_or_dict
return acc_out_ty[TensorMetadata._fields.index(field)]
def serialize_module_json_to_file(fx_module: GraphModule, fname: str):
weights: Dict = {}
serialized_json = json.dumps(serialize_module(fx_module, weights), indent=2)
with open(fname, "w") as ofile:
ofile.write(serialized_json)
def build_raw_tensor_meta(
shape=None,
dtype=None,
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
):
return TensorMetadata(**locals())
def draw_graph(traced: torch.fx.GraphModule, fname: str, figname: str = "fx_graph"):
base, ext = os.path.splitext(fname)
if not ext:
ext = ".svg"
print(f"Writing FX graph to file: {base}{ext}")
g = graph_drawer.FxGraphDrawer(traced, figname)
x = g.get_main_dot_graph()
getattr(x, "write_" + ext.lstrip("."))(fname)
def print_model_info(gm: torch.fx.GraphModule, header: Optional[str] = None):
"""
Print out info of the provided `gm`.
If `header` is provided then it's included in the printed string.
"""
ops_and_counts: Dict[Callable, int] = dict()
placeholder_count = get_attr_count = call_method_count = call_module_count = 0
for node in gm.graph.nodes:
if node.op == "call_function":
ops_and_counts[node.target] = ops_and_counts.get(node.target, 1) + 1
elif node.op == "placeholder":
placeholder_count += 1
elif node.op == "get_attr":
get_attr_count += 1
elif node.op == "call_method":
call_method_count += 1
elif node.op == "call_module":
call_module_count += 1
elif node.op == "output":
output_count = len(node.args[0]) if isinstance(node.args[0], tuple) else 1
else:
raise RuntimeError(f"Unknown node found: {node.format_node()}")
header = "" if header is None else f" [{header}]"
model_info_str = f"Model Info{header}:\n"
model_info_str += f"> placeholder: {placeholder_count}\n"
model_info_str += f"> get_attr: {get_attr_count}\n"
model_info_str += f"> output: {output_count}\n"
if call_module_count != 0:
model_info_str += f"> WARNING: call_module: {call_module_count}"
if call_method_count != 0:
model_info_str += f"> WARNING: call_method: {call_method_count}"
# Sort and print all the other ops. Sort so it's deterministic between runs and
# easier to parse.
pretty_ops_and_counts: List[Tuple[str, int]] = []
for op, count in ops_and_counts.items():
pretty_ops_and_counts.append((_get_qualified_name(op), count))
pretty_ops_and_counts.sort()
for op_str, count in pretty_ops_and_counts:
model_info_str += f"> {op_str}: {count}\n"
print(model_info_str)
def get_unique_attr_name_in_module(mod_traced: torch.fx.GraphModule, name: str) -> str:
"""
Make sure the name is unique (in a module) and can represents an attr.
"""
# Delete all characters that are illegal in a Python identifier.
name = re.sub("[^0-9a-zA-Z_]+", "_", name)
if name[0].isdigit():
name = f"_{name}"
# Now make sure it is in fact unique to the module by incrementing suffix value.
while hasattr(mod_traced, name):
match = re.match(r"(.*)_(\d+)$", name)
if match is None:
name = name + "_1"
else:
base, num = match.group(1, 2)
name = f"{base}_{int(num) + 1}"
return name
| 34.60625
| 87
| 0.668774
|
3a4cc5653528cdab1d3e19ad1d2885de35285add
| 4,083
|
py
|
Python
|
test/functional/feature_includeconf.py
|
DiniMuhd7/asusucoin
|
a28c442369571bc0280a84a686e00e7dffd91064
|
[
"MIT"
] | null | null | null |
test/functional/feature_includeconf.py
|
DiniMuhd7/asusucoin
|
a28c442369571bc0280a84a686e00e7dffd91064
|
[
"MIT"
] | null | null | null |
test/functional/feature_includeconf.py
|
DiniMuhd7/asusucoin
|
a28c442369571bc0280a84a686e00e7dffd91064
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "asusucoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "asusucoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| 49.192771
| 224
| 0.694832
|
3762c72cc883b57cb898d81c9aa212f4240fa138
| 19,940
|
py
|
Python
|
sphinx/pycode/parser.py
|
BroadbandForum/sphinx
|
93d6b1bbc67fc588e70bdab51252f7752b810f11
|
[
"BSD-2-Clause"
] | 1
|
2021-06-29T13:05:19.000Z
|
2021-06-29T13:05:19.000Z
|
sphinx/pycode/parser.py
|
BroadbandForum/sphinx
|
93d6b1bbc67fc588e70bdab51252f7752b810f11
|
[
"BSD-2-Clause"
] | 2
|
2022-02-13T19:49:39.000Z
|
2022-03-02T09:52:35.000Z
|
sphinx/pycode/parser.py
|
hixio-mh/sphinx
|
ba1450b25ea38fad42058d6a677201179002b9a2
|
[
"BSD-2-Clause"
] | null | null | null |
"""
sphinx.pycode.parser
~~~~~~~~~~~~~~~~~~~~
Utilities parsing and analyzing Python code.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import inspect
import itertools
import re
import sys
import tokenize
from token import NAME, NEWLINE, INDENT, DEDENT, NUMBER, OP, STRING
from tokenize import COMMENT, NL
from typing import Any, Dict, List, Optional, Tuple
from sphinx.pycode.ast import ast # for py37 or older
from sphinx.pycode.ast import parse, unparse
comment_re = re.compile('^\\s*#: ?(.*)\r?\n?$')
indent_re = re.compile('^\\s*$')
emptyline_re = re.compile('^\\s*(#.*)?$')
if sys.version_info >= (3, 6):
ASSIGN_NODES = (ast.Assign, ast.AnnAssign)
else:
ASSIGN_NODES = (ast.Assign)
def filter_whitespace(code: str) -> str:
return code.replace('\f', ' ') # replace FF (form feed) with whitespace
def get_assign_targets(node: ast.AST) -> List[ast.expr]:
"""Get list of targets from Assign and AnnAssign node."""
if isinstance(node, ast.Assign):
return node.targets
else:
return [node.target] # type: ignore
def get_lvar_names(node: ast.AST, self: ast.arg = None) -> List[str]:
"""Convert assignment-AST to variable names.
This raises `TypeError` if the assignment does not create new variable::
ary[0] = 'foo'
dic["bar"] = 'baz'
# => TypeError
"""
if self:
self_id = self.arg
node_name = node.__class__.__name__
if node_name in ('Index', 'Num', 'Slice', 'Str', 'Subscript'):
raise TypeError('%r does not create new variable' % node)
elif node_name == 'Name':
if self is None or node.id == self_id: # type: ignore
return [node.id] # type: ignore
else:
raise TypeError('The assignment %r is not instance variable' % node)
elif node_name in ('Tuple', 'List'):
members = []
for elt in node.elts: # type: ignore
try:
members.extend(get_lvar_names(elt, self))
except TypeError:
pass
return members
elif node_name == 'Attribute':
if node.value.__class__.__name__ == 'Name' and self and node.value.id == self_id: # type: ignore # NOQA
# instance variable
return ["%s" % get_lvar_names(node.attr, self)[0]] # type: ignore
else:
raise TypeError('The assignment %r is not instance variable' % node)
elif node_name == 'str':
return [node] # type: ignore
elif node_name == 'Starred':
return get_lvar_names(node.value, self) # type: ignore
else:
raise NotImplementedError('Unexpected node name %r' % node_name)
def dedent_docstring(s: str) -> str:
"""Remove common leading indentation from docstring."""
def dummy() -> None:
# dummy function to mock `inspect.getdoc`.
pass
dummy.__doc__ = s
docstring = inspect.getdoc(dummy)
return docstring.lstrip("\r\n").rstrip("\r\n")
class Token:
"""Better token wrapper for tokenize module."""
def __init__(self, kind: int, value: Any, start: Tuple[int, int], end: Tuple[int, int],
source: str) -> None:
self.kind = kind
self.value = value
self.start = start
self.end = end
self.source = source
def __eq__(self, other: Any) -> bool:
if isinstance(other, int):
return self.kind == other
elif isinstance(other, str):
return self.value == other
elif isinstance(other, (list, tuple)):
return [self.kind, self.value] == list(other)
elif other is None:
return False
else:
raise ValueError('Unknown value: %r' % other)
def match(self, *conditions: Any) -> bool:
return any(self == candidate for candidate in conditions)
def __repr__(self) -> str:
return '<Token kind=%r value=%r>' % (tokenize.tok_name[self.kind],
self.value.strip())
class TokenProcessor:
def __init__(self, buffers: List[str]) -> None:
lines = iter(buffers)
self.buffers = buffers
self.tokens = tokenize.generate_tokens(lambda: next(lines))
self.current = None # type: Token
self.previous = None # type: Token
def get_line(self, lineno: int) -> str:
"""Returns specified line."""
return self.buffers[lineno - 1]
def fetch_token(self) -> Token:
"""Fetch a next token from source code.
Returns ``None`` if sequence finished.
"""
try:
self.previous = self.current
self.current = Token(*next(self.tokens))
except StopIteration:
self.current = None
return self.current
def fetch_until(self, condition: Any) -> List[Token]:
"""Fetch tokens until specified token appeared.
.. note:: This also handles parenthesis well.
"""
tokens = []
while self.fetch_token():
tokens.append(self.current)
if self.current == condition:
break
elif self.current == [OP, '(']:
tokens += self.fetch_until([OP, ')'])
elif self.current == [OP, '{']:
tokens += self.fetch_until([OP, '}'])
elif self.current == [OP, '[']:
tokens += self.fetch_until([OP, ']'])
return tokens
class AfterCommentParser(TokenProcessor):
"""Python source code parser to pick up comment after assignment.
This parser takes a python code starts with assignment statement,
and returns the comments for variable if exists.
"""
def __init__(self, lines: List[str]) -> None:
super().__init__(lines)
self.comment = None # type: str
def fetch_rvalue(self) -> List[Token]:
"""Fetch right-hand value of assignment."""
tokens = []
while self.fetch_token():
tokens.append(self.current)
if self.current == [OP, '(']:
tokens += self.fetch_until([OP, ')'])
elif self.current == [OP, '{']:
tokens += self.fetch_until([OP, '}'])
elif self.current == [OP, '[']:
tokens += self.fetch_until([OP, ']'])
elif self.current == INDENT:
tokens += self.fetch_until(DEDENT)
elif self.current == [OP, ';']:
break
elif self.current.kind not in (OP, NAME, NUMBER, STRING):
break
return tokens
def parse(self) -> None:
"""Parse the code and obtain comment after assignment."""
# skip lvalue (or whole of AnnAssign)
while not self.fetch_token().match([OP, '='], NEWLINE, COMMENT):
assert self.current
# skip rvalue (if exists)
if self.current == [OP, '=']:
self.fetch_rvalue()
if self.current == COMMENT:
self.comment = self.current.value
class VariableCommentPicker(ast.NodeVisitor):
"""Python source code parser to pick up variable comments."""
def __init__(self, buffers: List[str], encoding: str) -> None:
self.counter = itertools.count()
self.buffers = buffers
self.encoding = encoding
self.context = [] # type: List[str]
self.current_classes = [] # type: List[str]
self.current_function = None # type: ast.FunctionDef
self.comments = {} # type: Dict[Tuple[str, str], str]
self.annotations = {} # type: Dict[Tuple[str, str], str]
self.previous = None # type: ast.AST
self.deforders = {} # type: Dict[str, int]
self.finals = [] # type: List[str]
self.typing = None # type: str
self.typing_final = None # type: str
super().__init__()
def get_qualname_for(self, name: str) -> Optional[List[str]]:
"""Get qualified name for given object as a list of string."""
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
return self.context[:-1] + [name]
else:
return None
else:
return self.context + [name]
def add_entry(self, name: str) -> None:
qualname = self.get_qualname_for(name)
if qualname:
self.deforders[".".join(qualname)] = next(self.counter)
def add_final_entry(self, name: str) -> None:
qualname = self.get_qualname_for(name)
if qualname:
self.finals.append(".".join(qualname))
def add_variable_comment(self, name: str, comment: str) -> None:
qualname = self.get_qualname_for(name)
if qualname:
basename = ".".join(qualname[:-1])
self.comments[(basename, name)] = comment
def add_variable_annotation(self, name: str, annotation: ast.AST) -> None:
qualname = self.get_qualname_for(name)
if qualname:
basename = ".".join(qualname[:-1])
self.annotations[(basename, name)] = unparse(annotation)
def is_final(self, decorators: List[ast.expr]) -> bool:
final = []
if self.typing:
final.append('%s.final' % self.typing)
if self.typing_final:
final.append(self.typing_final)
for decorator in decorators:
try:
if unparse(decorator) in final:
return True
except NotImplementedError:
pass
return False
def get_self(self) -> ast.arg:
"""Returns the name of first argument if in function."""
if self.current_function and self.current_function.args.args:
return self.current_function.args.args[0]
else:
return None
def get_line(self, lineno: int) -> str:
"""Returns specified line."""
return self.buffers[lineno - 1]
def visit(self, node: ast.AST) -> None:
"""Updates self.previous to ."""
super().visit(node)
self.previous = node
def visit_Import(self, node: ast.Import) -> None:
"""Handles Import node and record it to definition orders."""
for name in node.names:
self.add_entry(name.asname or name.name)
if name.name == 'typing':
self.typing = name.asname or name.name
elif name.name == 'typing.final':
self.typing_final = name.asname or name.name
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
"""Handles Import node and record it to definition orders."""
for name in node.names:
self.add_entry(name.asname or name.name)
if node.module == 'typing' and name.name == 'final':
self.typing_final = name.asname or name.name
def visit_Assign(self, node: ast.Assign) -> None:
"""Handles Assign node and pick up a variable comment."""
try:
targets = get_assign_targets(node)
varnames = sum([get_lvar_names(t, self=self.get_self()) for t in targets], []) # type: List[str] # NOQA
current_line = self.get_line(node.lineno)
except TypeError:
return # this assignment is not new definition!
# record annotation
if hasattr(node, 'annotation') and node.annotation: # type: ignore
for varname in varnames:
self.add_variable_annotation(varname, node.annotation) # type: ignore
elif hasattr(node, 'type_comment') and node.type_comment:
for varname in varnames:
self.add_variable_annotation(varname, node.type_comment) # type: ignore
# check comments after assignment
parser = AfterCommentParser([current_line[node.col_offset:]] +
self.buffers[node.lineno:])
parser.parse()
if parser.comment and comment_re.match(parser.comment):
for varname in varnames:
self.add_variable_comment(varname, comment_re.sub('\\1', parser.comment))
self.add_entry(varname)
return
# check comments before assignment
if indent_re.match(current_line[:node.col_offset]):
comment_lines = []
for i in range(node.lineno - 1):
before_line = self.get_line(node.lineno - 1 - i)
if comment_re.match(before_line):
comment_lines.append(comment_re.sub('\\1', before_line))
else:
break
if comment_lines:
comment = dedent_docstring('\n'.join(reversed(comment_lines)))
for varname in varnames:
self.add_variable_comment(varname, comment)
self.add_entry(varname)
return
# not commented (record deforders only)
for varname in varnames:
self.add_entry(varname)
def visit_AnnAssign(self, node: ast.AST) -> None: # Note: ast.AnnAssign not found in py35
"""Handles AnnAssign node and pick up a variable comment."""
self.visit_Assign(node) # type: ignore
def visit_Expr(self, node: ast.Expr) -> None:
"""Handles Expr node and pick up a comment if string."""
if (isinstance(self.previous, ASSIGN_NODES) and isinstance(node.value, ast.Str)):
try:
targets = get_assign_targets(self.previous)
varnames = get_lvar_names(targets[0], self.get_self())
for varname in varnames:
if isinstance(node.value.s, str):
docstring = node.value.s
else:
docstring = node.value.s.decode(self.encoding or 'utf-8')
self.add_variable_comment(varname, dedent_docstring(docstring))
self.add_entry(varname)
except TypeError:
pass # this assignment is not new definition!
def visit_Try(self, node: ast.Try) -> None:
"""Handles Try node and processes body and else-clause.
.. note:: pycode parser ignores objects definition in except-clause.
"""
for subnode in node.body:
self.visit(subnode)
for subnode in node.orelse:
self.visit(subnode)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""Handles ClassDef node and set context."""
self.current_classes.append(node.name)
self.add_entry(node.name)
if self.is_final(node.decorator_list):
self.add_final_entry(node.name)
self.context.append(node.name)
self.previous = node
for child in node.body:
self.visit(child)
self.context.pop()
self.current_classes.pop()
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
"""Handles FunctionDef node and set context."""
if self.current_function is None:
self.add_entry(node.name) # should be called before setting self.current_function
if self.is_final(node.decorator_list):
self.add_final_entry(node.name)
self.context.append(node.name)
self.current_function = node
for child in node.body:
self.visit(child)
self.context.pop()
self.current_function = None
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
"""Handles AsyncFunctionDef node and set context."""
self.visit_FunctionDef(node) # type: ignore
class DefinitionFinder(TokenProcessor):
"""Python source code parser to detect location of functions,
classes and methods.
"""
def __init__(self, lines: List[str]) -> None:
super().__init__(lines)
self.decorator = None # type: Token
self.context = [] # type: List[str]
self.indents = [] # type: List
self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def add_definition(self, name: str, entry: Tuple[str, int, int]) -> None:
"""Add a location of definition."""
if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def':
# ignore definition of inner function
pass
else:
self.definitions[name] = entry
def parse(self) -> None:
"""Parse the code to obtain location of definitions."""
while True:
token = self.fetch_token()
if token is None:
break
elif token == COMMENT:
pass
elif token == [OP, '@'] and (self.previous is None or
self.previous.match(NEWLINE, NL, INDENT, DEDENT)):
if self.decorator is None:
self.decorator = token
elif token.match([NAME, 'class']):
self.parse_definition('class')
elif token.match([NAME, 'def']):
self.parse_definition('def')
elif token == INDENT:
self.indents.append(('other', None, None))
elif token == DEDENT:
self.finalize_block()
def parse_definition(self, typ: str) -> None:
"""Parse AST of definition."""
name = self.fetch_token()
self.context.append(name.value)
funcname = '.'.join(self.context)
if self.decorator:
start_pos = self.decorator.start[0]
self.decorator = None
else:
start_pos = name.start[0]
self.fetch_until([OP, ':'])
if self.fetch_token().match(COMMENT, NEWLINE):
self.fetch_until(INDENT)
self.indents.append((typ, funcname, start_pos))
else:
# one-liner
self.add_definition(funcname, (typ, start_pos, name.end[0]))
self.context.pop()
def finalize_block(self) -> None:
"""Finalize definition block."""
definition = self.indents.pop()
if definition[0] != 'other':
typ, funcname, start_pos = definition
end_pos = self.current.end[0] - 1
while emptyline_re.match(self.get_line(end_pos)):
end_pos -= 1
self.add_definition(funcname, (typ, start_pos, end_pos))
self.context.pop()
class Parser:
"""Python source code parser to pick up variable comments.
This is a better wrapper for ``VariableCommentPicker``.
"""
def __init__(self, code: str, encoding: str = 'utf-8') -> None:
self.code = filter_whitespace(code)
self.encoding = encoding
self.annotations = {} # type: Dict[Tuple[str, str], str]
self.comments = {} # type: Dict[Tuple[str, str], str]
self.deforders = {} # type: Dict[str, int]
self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
self.finals = [] # type: List[str]
def parse(self) -> None:
"""Parse the source code."""
self.parse_comments()
self.parse_definition()
def parse_comments(self) -> None:
"""Parse the code and pick up comments."""
tree = parse(self.code)
picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
picker.visit(tree)
self.annotations = picker.annotations
self.comments = picker.comments
self.deforders = picker.deforders
self.finals = picker.finals
def parse_definition(self) -> None:
"""Parse the location of definitions from the code."""
parser = DefinitionFinder(self.code.splitlines(True))
parser.parse()
self.definitions = parser.definitions
| 36.789668
| 117
| 0.574774
|
5e867f41830adccdb61087457111ad833032e5a7
| 1,109
|
py
|
Python
|
multitest_transport/plugins/__init__.py
|
maksonlee/multitest_transport
|
9c20a48ac856307950a204854f52be7335705054
|
[
"Apache-2.0"
] | null | null | null |
multitest_transport/plugins/__init__.py
|
maksonlee/multitest_transport
|
9c20a48ac856307950a204854f52be7335705054
|
[
"Apache-2.0"
] | null | null | null |
multitest_transport/plugins/__init__.py
|
maksonlee/multitest_transport
|
9c20a48ac856307950a204854f52be7335705054
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A plugin package."""
import importlib
import logging
import os
import pkgutil
from multitest_transport.plugins.base import *
def Discover():
"""Import all plugin modules."""
logging.info('Discovering plugin modules...')
for _, name, ispkg in pkgutil.iter_modules([os.path.dirname(__file__)]):
if ispkg or name in ['base', 'constant', 'registry']:
continue
if __package__:
name = __package__ + '.' + name
logging.info('Importing plugin %s...', name)
importlib.import_module(name)
Discover()
| 30.805556
| 74
| 0.724977
|
e241536411fb2b06bfeaa8c885717fc6c67b7c92
| 163
|
py
|
Python
|
tapioca_qualys_was/__init__.py
|
siteblindado/tapioca-qualys-was
|
c6a76469065f6fead4b0c8444885589eb5d21730
|
[
"MIT"
] | null | null | null |
tapioca_qualys_was/__init__.py
|
siteblindado/tapioca-qualys-was
|
c6a76469065f6fead4b0c8444885589eb5d21730
|
[
"MIT"
] | 4
|
2020-03-24T17:14:26.000Z
|
2021-06-02T00:10:18.000Z
|
tapioca_qualys_was/__init__.py
|
siteblindado/tapioca-qualys-was
|
c6a76469065f6fead4b0c8444885589eb5d21730
|
[
"MIT"
] | null | null | null |
# coding: utf-8
__author__ = 'Site Blindado S.A.'
__email__ = 'dev@siteblindado.com.br'
__version__ = '0.10'
from .tapioca_qualys_was import Qualys_was # noqa
| 18.111111
| 50
| 0.730061
|
ec3a459238bb18aa6f7195722fa3c1df374e336d
| 6,139
|
py
|
Python
|
venv/lib/python3.5/site-packages/airflow/contrib/operators/gcs_to_bq.py
|
mesodiar/bello-airflow
|
afede57f214774b50e6a4c083ca096ca2c060d31
|
[
"MIT"
] | 1
|
2021-04-05T11:25:36.000Z
|
2021-04-05T11:25:36.000Z
|
airflow/contrib/operators/gcs_to_bq.py
|
fvlankvelt/airflow
|
6cbe4a475f773bf32e1d7743718f7ae1a7dd9c91
|
[
"Apache-2.0"
] | null | null | null |
airflow/contrib/operators/gcs_to_bq.py
|
fvlankvelt/airflow
|
6cbe4a475f773bf32e1d7743718f7ae1a7dd9c91
|
[
"Apache-2.0"
] | 1
|
2019-12-12T06:44:14.000Z
|
2019-12-12T06:44:14.000Z
|
import json
import logging
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToBigQueryOperator(BaseOperator):
"""
Loads files from Google cloud storage into BigQuery.
"""
template_fields = ('bucket','source_objects','schema_object','destination_project_dataset_table')
template_ext = ('.sql',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=False,
schema_object=False,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_id_key=False,
bigquery_conn_id='bigquery_default',
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
:param bucket: The bucket to load from.
:type bucket: string
:param source_objects: List of Google cloud storage URIs to load from.
:type object: list
:param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table> BigQuery table to load data
into. If <project> is not included, project will be the project defined in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that contains the schema for the table.
:param schema_object: string
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. Thsi will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:type max_id_key: string
:param bigquery_conn_id: Reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(GoogleCloudStorageToBigQueryOperator, self).__init__(*args, **kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_id_key = max_id_key
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
schema_fields = self.schema_fields if self.schema_fields else json.loads(gcs_hook.download(self.bucket, self.schema_object))
source_uris = map(lambda schema_object: 'gs://{}/{}'.format(self.bucket, schema_object), self.source_objects)
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter)
if self.max_id_key:
cursor.execute('SELECT MAX({}) FROM {}'.format(self.max_id_key, self.destination_project_dataset_table))
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
logging.info('Loaded BQ data with max {}.{}={}'.format(self.destination_project_dataset_table, self.max_id_key, max_id))
return max_id
| 47.960938
| 132
| 0.6892
|
bb7e0b7352545272ee9fee7da596c09b0c14a9f1
| 183
|
py
|
Python
|
Archive/LookingForEigenvalues.py
|
fikrinotes/PowerfulPython
|
85b4f09ef9faeecaa42e5214accda4ff07021cd7
|
[
"MIT"
] | 1
|
2021-09-18T09:30:47.000Z
|
2021-09-18T09:30:47.000Z
|
Archive/LookingForEigenvalues.py
|
fikrinotes/PowerfulPython
|
85b4f09ef9faeecaa42e5214accda4ff07021cd7
|
[
"MIT"
] | null | null | null |
Archive/LookingForEigenvalues.py
|
fikrinotes/PowerfulPython
|
85b4f09ef9faeecaa42e5214accda4ff07021cd7
|
[
"MIT"
] | null | null | null |
import numpy as np
A=np.array([[1,3],[-1,5]])
c=np.linalg.det(A)
d=np.linalg.inv(A)
b=np.linalg.eigvals(A)
print('Eigenvalues=',b)
print('determinant=',c)
print('inverseA:')
print(d)
| 18.3
| 26
| 0.672131
|
a3e999755a6e9001f39e06f6a03c124152f969a7
| 285
|
py
|
Python
|
code/tmp_rtrip/test/encoded_modules/__init__.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/test/encoded_modules/__init__.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/test/encoded_modules/__init__.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
test_strings = ('iso_8859_1', 'iso-8859-1',
"Les hommes ont oublié cette vérité, dit le renard. Mais tu ne dois pas l'oublier. Tu deviens responsable pour toujours de ce que tu as apprivoisé."
), ('koi8_r', 'koi8-r',
'Познание бесконечности требует бесконечного времени.')
| 57
| 152
| 0.715789
|
df8e270547a929ed57a2b10d5a6de26816575a37
| 1,474
|
py
|
Python
|
sample/qiskit_cuccaro.py
|
gines-carrascal/jupyter-qiskit
|
cb831e0defb23e670a3c56cee36eb2138af5998f
|
[
"Apache-2.0"
] | 1
|
2020-04-19T17:21:24.000Z
|
2020-04-19T17:21:24.000Z
|
sample/qiskit_cuccaro.py
|
gines-carrascal/jupyter-qiskit
|
cb831e0defb23e670a3c56cee36eb2138af5998f
|
[
"Apache-2.0"
] | null | null | null |
sample/qiskit_cuccaro.py
|
gines-carrascal/jupyter-qiskit
|
cb831e0defb23e670a3c56cee36eb2138af5998f
|
[
"Apache-2.0"
] | null | null | null |
from qiskit import ClassicalRegister, QuantumRegister
from qiskit import QuantumCircuit
from qiskit import execute
from qiskit import Aer
sumando_1 = input("Primer sumando en binario (4 bits)")
sumando_2 = input("Segundo sumando en binario(4 bits)")
n = 4
a = QuantumRegister(n,"a")
b = QuantumRegister(n+1, "b")
c = QuantumRegister(1, "c")
resultado = ClassicalRegister(n+1, "result")
qc = QuantumCircuit(a,b,c,resultado)
for i in range(n):
if sumando_1[i] == "1":
qc.x(a[n - (i+1)])
for i in range(n):
if sumando_2[i] == "1":
qc.x(b[n - (i+1)])
for i in range(1, n):
qc.cx(a[i], b[i])
qc.cx(a[1], c[0])
qc.ccx(a[0], b[0], c[0])
qc.cx(a[2], a[1])
qc.ccx(c[0], b[1], a[1])
qc.cx(a[3], a[2])
for i in range(2, n-2):
qc.ccx(a[i-1], b[i], a[i])
qc.cx(a[i+2], a[i+1])
qc.ccx(a[n-3], b[n-2], a[n-2])
qc.cx(a[n-1], b[n])
qc.ccx(a[n-2], b[n-1], b[n])
for i in range(1, n-1):
qc.x(b[i])
qc.cx(c[0], b[1])
for i in range(2, n):
qc.cx(a[i-1], b[i])
qc.ccx(a[n-3], b[n-2], a[n-2])
for i in range(n-3,1,-1):
qc.ccx(a[i-1], b[i], a[i])
qc.cx(a[i+2], a[i+1])
qc.x(b[i+1])
qc.ccx(c[0], b[1], a[1])
qc.cx(a[3], a[2])
qc.x(b[2])
qc.ccx(a[0], b[0], c[0])
qc.cx(a[2], a[1])
qc.x(b[1])
qc.cx(a[1], c[0])
for i in range(n):
qc.cx(a[i], b[i])
qc.measure(b,resultado)
my_backend = Aer.get_backend("qasm_simulator")
job = execute(qc, my_backend, shots=20)
job_stats = job.result().get_counts()
print(job_stats)
| 19.394737
| 55
| 0.566486
|
d6cad9210b593b3a7884fc6f1dec0230b3e5962d
| 315
|
py
|
Python
|
pytest_django_test/settings_mysql_innodb.py
|
garyd203/pytest-django
|
5c91295c5818f275c5e8e92e6e24391a85c6b854
|
[
"BSD-3-Clause"
] | 1
|
2020-02-20T12:31:02.000Z
|
2020-02-20T12:31:02.000Z
|
pytest_django_test/settings_mysql_innodb.py
|
garyd203/pytest-django
|
5c91295c5818f275c5e8e92e6e24391a85c6b854
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_django_test/settings_mysql_innodb.py
|
garyd203/pytest-django
|
5c91295c5818f275c5e8e92e6e24391a85c6b854
|
[
"BSD-3-Clause"
] | null | null | null |
from .settings_base import * # noqa: F403
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "pytest_django" + db_suffix, # noqa: F405
"HOST": "localhost",
"USER": "root",
"OPTIONS": {"init_command": "SET default_storage_engine=InnoDB"},
}
}
| 26.25
| 73
| 0.568254
|
c6d54e848ec728627117e7346264dcbe45ffb951
| 6,516
|
py
|
Python
|
chempy/kinetics/_native.py
|
bertiewooster/chempy
|
115adc1d570aa1631baff4374f3128ce23fa7776
|
[
"BSD-2-Clause"
] | 340
|
2015-10-30T03:41:05.000Z
|
2022-03-31T05:01:17.000Z
|
chempy/kinetics/_native.py
|
bertiewooster/chempy
|
115adc1d570aa1631baff4374f3128ce23fa7776
|
[
"BSD-2-Clause"
] | 80
|
2015-11-03T13:31:23.000Z
|
2022-03-31T16:46:19.000Z
|
chempy/kinetics/_native.py
|
bertiewooster/chempy
|
115adc1d570aa1631baff4374f3128ce23fa7776
|
[
"BSD-2-Clause"
] | 75
|
2016-06-06T19:55:48.000Z
|
2022-03-19T23:39:13.000Z
|
# -*- coding: utf-8 -*-
"""
Non-public API (expect changes without notice).
Helper functions for using native code generation together with pyodesys.
"""
from collections import OrderedDict
try:
from pyodesys.native import native_sys
except ImportError:
native_sys = None
PartiallySolvedSystem = None
render_mako = None
else:
from pyodesys.symbolic import PartiallySolvedSystem
from pyodesys.native.util import render_mako
from .. import Substance
_anon = """
template <typename T>
constexpr T vecmin(T&& a){
return std::forward<T>(a);
}
template <typename T1, typename T2>
constexpr typename std::common_type<T1, T2>::type vecmin(T1&& a, T2&& b){
return (a < b) ? std::forward<T1>(a) : std::forward<T2>(b);
}
template <typename T, typename... Ts>
constexpr typename std::common_type<T, Ts...>::type vecmin(T&& a, Ts&&... args){
return vecmin(std::forward<T>(a), vecmin(std::forward<Ts>(args)...));
}
std::vector<double> upper_conc_bounds(const double * const y){
auto bounds = std::vector<double>(${odesys.ny});
double cc[${ncomp}];
% for ci in range(ncomp):
cc[${ci}] = ${' + '.join([('%d*y[%d]' % (v, k)) if v != 1 else 'y[%d]' % k for k, v in comp_conc[ci].items()])};
% endfor
% for si, subst_key in enumerate(getattr(odesys, 'free_names', odesys.names)):
% if len(subst_comp[si]) > 0:
bounds[${si}] = vecmin(${', '.join(['INFINITY' if n == 0 else ('cc[%d]/%d' % (ci, n)) if n != 1 else 'cc[%d]' % ci for ci, n in subst_comp[si].items()])});
% else:
bounds[${si}] = INFINITY;
% endif
% endfor
return bounds;
}
""" # noqa
_first_step = """
m_upper_bounds = upper_conc_bounds(${init_conc});
m_lower_bounds.resize(${odesys.ny});
return m_rtol*std::min(get_dx_max(x, y), 1.0);
"""
_roots_ss = """
const int ny = get_ny();
std::vector<double> f(ny);
double tot=0.0;
rhs(x, y, &f[0]);
for (int i=0; i<ny; ++i){
tot += std::min(std::abs(f[i]/m_atol[i]), std::abs(f[i]/y[i]/m_rtol)); // m_atol needs to be of size ny!
}
out[0] = tot/ny - m_special_settings[0];
this->nrev++;
return AnyODE::Status::success;
"""
_constr_special_settings = r"""
if (m_special_settings.size() == 0){
std::cerr << __FILE__ << ":" << __LINE__ << ": no special_settings passed, using default [%(factor)s]\n";
m_special_settings = {%(factor)s};
} else {
// std::cerr << __FILE__ << ":" << __LINE__ << ": using special_settings:" << m_special_settings[0] << "\n";
}
""" % {
"factor": "1e2"
}
def _get_comp_conc(rsys, odesys, comp_keys, skip_keys):
comp_conc = []
for comp_key in comp_keys:
if comp_key in skip_keys:
continue # see Substance.__doc__
_d = OrderedDict()
for si, subst_key in enumerate(odesys.names):
coeff = rsys.substances[subst_key].composition.get(comp_key, 0)
if coeff != 0:
_d[si] = coeff
comp_conc.append(_d)
return comp_conc
def _get_subst_comp(rsys, odesys, comp_keys, skip_keys):
subst_comp = []
for subst_key in odesys.names:
_d = OrderedDict()
for k, v in rsys.substances[subst_key].composition.items():
if k in skip_keys:
continue
_d[comp_keys.index(k)] = v
subst_comp.append(_d)
return subst_comp
def get_native(
rsys, odesys, integrator, skip_keys=(0,), steady_state_root=False, conc_roots=None
):
comp_keys = Substance.composition_keys(
rsys.substances.values(), skip_keys=skip_keys
)
if PartiallySolvedSystem is None:
raise ValueError("Failed to import 'native_sys' from 'pyodesys.native'")
elif isinstance(odesys, PartiallySolvedSystem):
init_conc = "&m_p[%d]" % (len(odesys.params) - len(odesys.original_dep))
else:
init_conc = "y"
kw = dict(
namespace_override={
"p_get_dx_max": True,
}
)
if all(subst.composition is None for subst in rsys.substances.values()):
pass
else:
kw["namespace_override"]["p_anon"] = render_mako(
_anon,
odesys=odesys,
ncomp=len(comp_keys),
comp_conc=_get_comp_conc(rsys, odesys, comp_keys, skip_keys),
subst_comp=_get_subst_comp(rsys, odesys, comp_keys, skip_keys),
)
kw["namespace_override"]["p_first_step"] = render_mako(
_first_step, init_conc=init_conc, odesys=odesys
)
ns_extend = kw.get("namespace_extend", {})
if steady_state_root or conc_roots:
if not native_sys[integrator]._NativeCode._support_roots:
raise ValueError("integrator '%s' does not support roots." % integrator)
if odesys.roots is not None:
raise ValueError("roots already set")
if steady_state_root:
assert conc_roots is None
kw["namespace_override"]["p_nroots"] = " return 1; "
kw["namespace_override"]["p_roots"] = _roots_ss
if "p_constructor" not in ns_extend:
ns_extend["p_constructor"] = []
ns_extend["p_constructor"] += [_constr_special_settings]
elif conc_roots:
# This could (with some effort) be rewritten to take limits as parameters and have a
# preprocessor in odesys.pre_processors do the dedimensionalization.
assert not steady_state_root
assert all(k in odesys.names and k in rsys.substances for k in conc_roots)
kw["namespace_override"]["p_nroots"] = " return %d; " % len(conc_roots)
kw["namespace_override"]["p_roots"] = (
"".join(
[
" out[%(i)d] = y[%(j)d] - m_special_settings[%(i)d];\n"
% dict(i=i, j=odesys.names.index(k))
for i, k in enumerate(conc_roots)
]
)
+ " return AnyODE::Status::success;\n"
)
if "p_constructor" not in ns_extend:
ns_extend["p_constructor"] = []
ns_extend["p_constructor"] += [
'if (m_special_settings.size() != %d) throw std::runtime_error("special_settings missing");'
% len(conc_roots)
]
if "p_includes" not in ns_extend:
ns_extend["p_includes"] = set()
ns_extend["p_includes"] |= {"<type_traits>", "<vector>"}
return native_sys[integrator].from_other(odesys, namespace_extend=ns_extend, **kw)
| 35.606557
| 163
| 0.596378
|
12ec335ac4c5f2101234592883cbfdbe388ecee8
| 12,193
|
py
|
Python
|
app/Controllers/UsersController.py
|
huaSoftware/easy-flask-json-mvc-socketio
|
d6aec4b3e610b4cc04c1650801a061c8fb92030e
|
[
"Apache-2.0"
] | 1
|
2019-12-11T01:26:00.000Z
|
2019-12-11T01:26:00.000Z
|
app/Controllers/UsersController.py
|
huaSoftware/easy-flask-json-mvc-socketio
|
d6aec4b3e610b4cc04c1650801a061c8fb92030e
|
[
"Apache-2.0"
] | null | null | null |
app/Controllers/UsersController.py
|
huaSoftware/easy-flask-json-mvc-socketio
|
d6aec4b3e610b4cc04c1650801a061c8fb92030e
|
[
"Apache-2.0"
] | null | null | null |
'''
@Author: hua
@Date: 2018-08-30 10:52:23
@LastEditors: hua
@LastEditTime: 2019-12-03 14:13:31
'''
from app import app
from app.Controllers.BaseController import BaseController
from app.Vendor.Utils import Utils
from app.Models.Users import Users
from app.Models.Suggest import Suggest
from app.Models.Comments import Comments
from app.Models.ImgShard import ImgShard
from app.Models.Log import Log
from app.Vendor.UsersAuthJWT import UsersAuthJWT
from app.Vendor.Decorator import validator
from flask import request
from werkzeug.utils import secure_filename
import os, base64
@app.route('/', methods=['GET'])
def index():
""" 测试 """
Log().add({
"type":1,
"level":1,
"data":"1"
})
return BaseController().successData(msg='启动成功')
@app.route('/api/v2/register', methods=['POST'])
@validator(name="email", rules={'required': True, 'type': 'string', 'minlength': 10, 'maxlength': 20})
@validator(name="password", rules={'required': True, 'type': 'string', 'minlength': 6, 'maxlength': 20})
def register(params):
''' 注册 '''
filters = {
Users.email == params['email']
}
userData = Users().getOne(filters)
if(userData == None):
user = Users(
email=params['email'],
password=Users.set_password(params['password']),
status=1)
status = user.add(user)
if status == True:
return BaseController().successData(msg='注册成功')
return BaseController().error('注册失败')
return BaseController().error('账号已注册')
@app.route('/api/v2/login', methods=['POST'])
def login():
''' 登录 '''
email = request.json.get('email')
password = request.json.get('password')
if (not email or not password):
return BaseController().error('用户名和密码不能为空')
else:
result = UsersAuthJWT.authenticate(email, password)
return result
@app.route('/api/v2/user', methods=['GET'])
def get():
'''
*获取用户信息
*jwt中修改error处理方法,统一响应头
*_default_jwt_error_handler
'''
result = UsersAuthJWT().identify(request)
if isinstance(result, str):
return BaseController().error(result)
if (result['data']):
user = Users.get(result['data']['id'])
returnUser = {
'id': user.id,
'name': user.name,
'email': user.email,
'login_time': user.updated_at
}
return BaseController().successData(returnUser)
return BaseController().error('未找到用户')
@app.route('/api/v2/userInfo', methods=['POST'])
def getInfo():
""" 不通过鉴权获取用户信息 """
id = request.json.get('id')
data = Users.query.filter_by(id=id).all()
datas = Utils.db_l_to_d(data)
return BaseController().successData(datas)
@app.route('/api/v2/user/suggest', methods=['GET'])
def userSuggest():
''' 查询用户留言记录,一对多
'''
data_msg = Suggest.on_to_many()
return BaseController().successData(data_msg)
@app.route('/api/v2/user/suggest/join', methods=['GET'])
def userSuggestJoin():
# join
data_msg = Suggest.join()
print(data_msg)
return BaseController().successData(data_msg)
@app.route('/api/v2/user/suggest/left', methods=['GET'])
def userSuggestLeft():
# left join
# 如果想使用right join的话 把类颠倒下即可。
data_msg = Suggest.leftJoin()
return BaseController().successData(data_msg)
@app.route('/api/v2/document/upload', methods=['POST'])
def documentUpload():
""" 上传文件并验证
https://zhuanlan.zhihu.com/p/23731819?refer=flask
"""
files = request.files['document']
filename = secure_filename(files.filename)
if(files and Utils.allowed_file(filename)):
path = os.getcwd()+"/uploads/"+filename
files.save(path)
return BaseController().error('你成功走通了')
return BaseController().error('文件类型错误')
@app.route('/api/v2//document/upload/base64', methods=['post'])
def documentUploadBase64():
"""上传base64形式文件并杨峥
需要前端传入文件类型
"""
# 二维数组验证
rules = {
'userImgOne': {
'type': 'dict',
'schema': {
'imgBase64': {
'required': True,
'type': 'string',
'minlength': 2
},
'name': {
'required': True,
'type': 'string',
'minlength': 2
},
'size': {
'required': True,
'type': 'integer',
'minlength': 2
},
'type': {
'required': True,
'type': 'string',
'minlength': 2
}
}
},
'userImgTwo': {
'type': 'dict',
'schema': {
'imgBase64': {
'required': True,
'type': 'string',
'minlength': 2
},
'name': {
'required': True,
'type': 'string',
'minlength': 2
},
'size': {
'required': True,
'type': 'integer',
'minlength': 2
},
'type': {
'required': True,
'type': 'string',
'minlength': 2
}
}
},
'userImgThree': {
'type': 'dict',
'schema': {
'imgBase64': {
'required': True,
'type': 'string',
'minlength': 2
},
'name': {
'required': True,
'type': 'string',
'minlength': 2
},
'size': {
'required': True,
'type': 'integer',
'minlength': 2
},
'type': {
'required': True,
'type': 'string',
'minlength': 2
}
}
}
}
error_msg = {
'userImgOne': {
'type': 'dict',
'schema': {
'imgBase64': {
'required': u'图一是必须的',
'type': u'图一必须是字符串',
'minlength': u'图一字符最小是2'
},
'name': {
'required': u'图一是必须的',
'type': u'图一必须是字符串',
'minlength': u'图一字符最小是2'
},
'size': {
'required': u'图一是必须的',
'type': u'图一必须是字符串',
'minlength': u'图一字符最小是2'
},
'type': {
'required': u'图一是必须的',
'type': u'图一必须是字符串',
'minlength': u'图一字符最小是2'
}
}
},
'userImgTwo': {
'type': 'dict',
'schema': {
'imgBase64': {
'required': u'图二是必须的',
'type': u'图二必须是字符串',
'minlength': u'图二字符最小是2'
},
'name': {
'required': u'图二是必须的',
'type': u'图二必须是字符串',
'minlength': u'图二字符最小是2'
},
'size': {
'required': u'图二是必须的',
'type': u'图二必须是整数',
'minlength': u'图二字符最小是2'
},
'type': {
'required': u'图二是必须的',
'type': u'图二必须是字符串',
'minlength': u'图二字符最小是2'
}
}
},
'userImgThree': {
'type': 'dict',
'schema': {
'imgBase64': {
'required': u'图三是必须的',
'type': u'图三必须是字符串',
'minlength': u'图三字符最小是2'
},
'name': {
'required': u'图三是必须的',
'type': u'图三必须是字符串',
'minlength': u'图三字符最小是2'
},
'size': {
'required': u'图三是必须的',
'type': u'图三必须是整数',
'minlength': u'图三字符最小是2'
},
'type': {
'required': u'图三是必须的',
'type': u'图三必须是字符串',
'minlength': u'图三字符最小是2'
}
}
}
}
error = BaseController().validateInput(rules, error_msg)
if(error is not True):
return error
# 这边图片类型,大小判断请根据需求自己判断,暂不展开
for(k, v) in request.json.items():
userImg = v['imgBase64'].split(',')[1]
imgdata = base64.b64decode(userImg)
path = os.getcwd()+"/uploads/"+Utils.unique_id()+'.jpg'
file = open(path, 'wb')
file.write(imgdata)
file.close()
"""userImgOne = request.json.get('userImgOne')['imgBase64'].split(',')[1]
userImgTwo = request.json.get('userImgTwo')['imgBase64'].split(',')[1]
userImgThree = request.json.get('userImgThree')['imgBase64'].split(',')[1]
imgdata = base64.b64decode(userImgOne) """
return BaseController().successData(msg='图片提交成功')
@app.route('/api/v2/comments/get', methods=['post'])
def commentsGet():
rules = {
'pageNo': {
'required': True,
'type': 'integer'
},
'pageSize': {
'required': True,
'type': 'integer'
}
}
error_msg = {
'pageNo': {
'required': u'当前页是必须的',
'type': u'当前页必须是整数'
},
'pageSize': {
'required': u'当前页是必须的',
'type': u'当前页必须是整数'
}
}
error = BaseController().validateInput(rules, error_msg)
if(error is not True):
return error
pageNo = request.json.get('pageNo')
pageSize = request.json.get('pageSize')
data = Comments().getCommentsList(pageNo, pageSize)
return BaseController().json(data)
@app.route('/api/v2/imgShard/save', methods=['post'])
def imgShard():
""" 接收图片分片数据并存入数据库 """
rules = {
'index': {
'required': True,
'type': 'integer'
},
'uuid': {
'required': True,
'type': 'string'
},
'imgString': {
'required': True,
'type': 'string'
}
}
error_msg = {
'index': {
'required': u'图片索引是必须的',
'type': u'图片索引必须是字符串'
},
'uuid': {
'required': u'唯一id是必须的',
'type': u'唯一id必须是字符串'
},
'imgString': {
'required': u'当前页是必须的',
'type': u'当前页必须是字符串'
}
}
error = BaseController().validateInput(rules, error_msg)
if(error is not True):
return error
index = request.json.get('index')
uuid = request.json.get('uuid')
imgString = request.json.get('imgString')
data = ImgShard.add(index, uuid, imgString)
if data:
return BaseController().successData(data=0, msg='图片分片提交失败')
else:
return BaseController().successData(data=index, msg='图片分片提交成功')
@app.route('/api/v2/imgShard/switch', methods=['post'])
def imgSwitch():
""" 接收图片uuid并转换成图片 """
rules = {
'uuid': {
'required': True,
'type': 'string'
}
}
error_msg = {
'uuid': {
'required': u'唯一id是必须的',
'type': u'唯一id必须是字符串'
}
}
error = BaseController().validateInput(rules, error_msg)
if(error is not True):
return error
uuid = request.json.get('uuid')
data = ImgShard.getData(uuid)
base64Data = ''
for i in data:
base64Data = base64Data + i['imgString']
userImg = base64Data.split(',')[1]
imgdata = base64.b64decode(userImg)
rela_path = "/uploads/"+Utils.unique_id()+'.jpg'
path = os.getcwd()+rela_path
file = open(path, 'wb')
file.write(imgdata)
file.close()
return BaseController().successData(data={"url": rela_path}, msg='图片提交成功')
| 29.239808
| 104
| 0.466743
|
759d43875fa3590b078f1e9b4ad53bef56645efa
| 365
|
py
|
Python
|
yatube/posts/migrations/0003_auto_20200207_1245.py
|
ashowlsky/yatube_subdomain
|
c15782caec647936f252e41bd54df5a62e9922d6
|
[
"MIT"
] | null | null | null |
yatube/posts/migrations/0003_auto_20200207_1245.py
|
ashowlsky/yatube_subdomain
|
c15782caec647936f252e41bd54df5a62e9922d6
|
[
"MIT"
] | null | null | null |
yatube/posts/migrations/0003_auto_20200207_1245.py
|
ashowlsky/yatube_subdomain
|
c15782caec647936f252e41bd54df5a62e9922d6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2020-02-07 09:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200204_2115'),
]
operations = [
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(),
),
]
| 19.210526
| 45
| 0.580822
|
a3c844d939edf1851abb54d5256b9c31c7ca9450
| 2,892
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripPanelRenderEventHandler.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripPanelRenderEventHandler.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripPanelRenderEventHandler.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class ToolStripPanelRenderEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle System.Windows.Forms.ToolStripPanel paint events.
ToolStripPanelRenderEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self,sender,e,callback,object):
""" BeginInvoke(self: ToolStripPanelRenderEventHandler,sender: object,e: ToolStripPanelRenderEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: ToolStripPanelRenderEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,sender,e):
""" Invoke(self: ToolStripPanelRenderEventHandler,sender: object,e: ToolStripPanelRenderEventArgs) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
| 30.442105
| 215
| 0.723721
|
54714864dc054d586aa0265f930b754ab2d95de8
| 113,943
|
py
|
Python
|
test/orm/test_froms.py
|
randallk/sqlalchemy
|
d8ac1e9e6bfc931d2f14f9846d6924106f56b7e6
|
[
"MIT"
] | null | null | null |
test/orm/test_froms.py
|
randallk/sqlalchemy
|
d8ac1e9e6bfc931d2f14f9846d6924106f56b7e6
|
[
"MIT"
] | null | null | null |
test/orm/test_froms.py
|
randallk/sqlalchemy
|
d8ac1e9e6bfc931d2f14f9846d6924106f56b7e6
|
[
"MIT"
] | null | null | null |
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import asc
from sqlalchemy import cast
from sqlalchemy import desc
from sqlalchemy import exc as sa_exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.orm import aliased
from sqlalchemy.orm import backref
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import column_property
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import contains_alias
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import create_session
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relation
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm.util import join
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
from test.orm import _fixtures
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
(
Node,
composite_pk_table,
users,
Keyword,
items,
Dingaling,
order_items,
item_keywords,
Item,
User,
dingalings,
Address,
keywords,
CompositePk,
nodes,
Order,
orders,
addresses,
) = (
cls.classes.Node,
cls.tables.composite_pk_table,
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.classes.Dingaling,
cls.tables.order_items,
cls.tables.item_keywords,
cls.classes.Item,
cls.classes.User,
cls.tables.dingalings,
cls.classes.Address,
cls.tables.keywords,
cls.classes.CompositePk,
cls.tables.nodes,
cls.classes.Order,
cls.tables.orders,
cls.tables.addresses,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", order_by=addresses.c.id
),
"orders": relationship(
Order, backref="user", order_by=orders.c.id
), # o2m, m2o
},
)
mapper(
Address,
addresses,
properties={
"dingaling": relationship(
Dingaling, uselist=False, backref="address"
) # o2o
},
)
mapper(Dingaling, dingalings)
mapper(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
), # m2m
"address": relationship(Address), # m2o
},
)
mapper(
Item,
items,
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
) # m2m
mapper(Keyword, keywords)
mapper(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=[nodes.c.id])
)
},
)
mapper(CompositePk, composite_pk_table)
configure_mappers()
class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
query_correlated = (
"SELECT users.name AS users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
)
query_not_correlated = (
"SELECT users.name AS users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
)
def test_scalar_subquery_select_auto_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)], addresses.c.user_id == users.c.id
).scalar_subquery()
query = select([users.c.name.label("users_name"), query])
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_select_explicit_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(
[func.count(addresses.c.id)], addresses.c.user_id == users.c.id
)
.correlate(users)
.scalar_subquery()
)
query = select([users.c.name.label("users_name"), query])
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_select_correlate_off(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(
[func.count(addresses.c.id)], addresses.c.user_id == users.c.id
)
.correlate(None)
.scalar_subquery()
)
query = select([users.c.name.label("users_name"), query])
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_auto_correlate(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_explicit_correlate(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.correlate(self.tables.users)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_correlate_off(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.correlate(None)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect()
)
def test_correlate_to_union(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
q = sess.query(User).union(q)
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > User.id)
orm_subq = sess.query(u_alias).filter(u_alias.id > User.id).exists()
self.assert_compile(
q.add_column(raw_subq),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT * FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
# only difference is "1" vs. "*" (not sure why that is)
self.assert_compile(
q.add_column(orm_subq),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT 1 FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
"""compare a bunch of select() tests with the equivalent Query using
straight table/columns.
Results should be the same as Query should act as a select() pass-
thru for ClauseElement entities.
"""
__dialect__ = "default"
def test_select(self):
addresses, users = self.tables.addresses, self.tables.users
sess = create_session()
self.assert_compile(
sess.query(users)
.select_entity_from(users.select().subquery())
.with_labels()
.statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) AS anon_1",
)
self.assert_compile(
sess.query(users, exists([1], from_obj=addresses))
.with_labels()
.statement,
"SELECT users.id AS users_id, users.name AS users_name, EXISTS "
"(SELECT 1 FROM addresses) AS anon_1 FROM users",
)
# a little tedious here, adding labels to work around Query's
# auto-labelling.
s = (
sess.query(
addresses.c.id.label("id"),
addresses.c.email_address.label("email"),
)
.filter(addresses.c.user_id == users.c.id)
.correlate(users)
.statement.alias()
)
self.assert_compile(
sess.query(users, s.c.email)
.select_entity_from(users.join(s, s.c.id == users.c.id))
.with_labels()
.statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email AS anon_1_email "
"FROM users JOIN (SELECT addresses.id AS id, "
"addresses.email_address AS email FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 "
"ON anon_1.id = users.id",
)
x = func.lala(users.c.id).label("foo")
self.assert_compile(
sess.query(x).filter(x == 5).statement,
"SELECT lala(users.id) AS foo FROM users WHERE "
"lala(users.id) = :param_1",
)
self.assert_compile(
sess.query(func.sum(x).label("bar")).statement,
"SELECT sum(lala(users.id)) AS bar FROM users",
)
class FromSelfTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_filter(self):
User = self.classes.User
eq_(
[User(id=8), User(id=9)],
create_session()
.query(User)
.filter(User.id.in_([8, 9]))
.from_self()
.all(),
)
eq_(
[User(id=8), User(id=9)],
create_session()
.query(User)
.order_by(User.id)
.slice(1, 3)
.from_self()
.all(),
)
eq_(
[User(id=8)],
list(
create_session()
.query(User)
.filter(User.id.in_([8, 9]))
.from_self()
.order_by(User.id)[0:1]
),
)
def test_join(self):
User, Address = self.classes.User, self.classes.Address
eq_(
[
(User(id=8), Address(id=2)),
(User(id=8), Address(id=3)),
(User(id=8), Address(id=4)),
(User(id=9), Address(id=5)),
],
create_session()
.query(User)
.filter(User.id.in_([8, 9]))
.from_self()
.join("addresses")
.add_entity(Address)
.order_by(User.id, Address.id)
.all(),
)
def test_group_by(self):
Address = self.classes.Address
eq_(
create_session()
.query(Address.user_id, func.count(Address.id).label("count"))
.group_by(Address.user_id)
.order_by(Address.user_id)
.all(),
[(7, 1), (8, 3), (9, 1)],
)
eq_(
create_session()
.query(Address.user_id, Address.id)
.from_self(Address.user_id, func.count(Address.id))
.group_by(Address.user_id)
.order_by(Address.user_id)
.all(),
[(7, 1), (8, 3), (9, 1)],
)
def test_having(self):
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User.id).group_by(User.id).having(User.id > 5).from_self(),
"SELECT anon_1.users_id AS anon_1_users_id FROM "
"(SELECT users.id AS users_id FROM users GROUP "
"BY users.id HAVING users.id > :id_1) AS anon_1",
)
def test_no_joinedload(self):
"""test that joinedloads are pushed outwards and not rendered in
subqueries."""
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User)
.options(joinedload(User.addresses))
.from_self()
.statement,
"SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS users_id, users.name AS "
"users_name FROM users) AS anon_1 LEFT OUTER JOIN "
"addresses AS addresses_1 ON anon_1.users_id = "
"addresses_1.user_id ORDER BY addresses_1.id",
)
def test_aliases(self):
"""test that aliased objects are accessible externally to a from_self()
call."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
ualias = aliased(User)
eq_(
s.query(User, ualias)
.filter(User.id > ualias.id)
.from_self(User.name, ualias.name)
.order_by(User.name, ualias.name)
.all(),
[
("chuck", "ed"),
("chuck", "fred"),
("chuck", "jack"),
("ed", "jack"),
("fred", "ed"),
("fred", "jack"),
],
)
eq_(
s.query(User, ualias)
.filter(User.id > ualias.id)
.from_self(User.name, ualias.name)
.filter(ualias.name == "ed")
.order_by(User.name, ualias.name)
.all(),
[("chuck", "ed"), ("fred", "ed")],
)
eq_(
s.query(User, ualias)
.filter(User.id > ualias.id)
.from_self(ualias.name, Address.email_address)
.join(ualias.addresses)
.order_by(ualias.name, Address.email_address)
.all(),
[
("ed", "fred@fred.com"),
("jack", "ed@bettyboop.com"),
("jack", "ed@lala.com"),
("jack", "ed@wood.com"),
("jack", "fred@fred.com"),
],
)
def test_multiple_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
eq_(
sess.query(User, Address)
.filter(User.id == Address.user_id)
.filter(Address.id.in_([2, 5]))
.from_self()
.all(),
[(User(id=8), Address(id=2)), (User(id=9), Address(id=5))],
)
eq_(
sess.query(User, Address)
.filter(User.id == Address.user_id)
.filter(Address.id.in_([2, 5]))
.from_self()
.options(joinedload("addresses"))
.first(),
(
User(id=8, addresses=[Address(), Address(), Address()]),
Address(id=2),
),
)
def test_multiple_with_column_entities(self):
User = self.classes.User
sess = create_session()
eq_(
sess.query(User.id)
.from_self()
.add_column(func.count().label("foo"))
.group_by(User.id)
.order_by(User.id)
.from_self()
.all(),
[(7, 1), (8, 1), (9, 1), (10, 1)],
)
class ColumnAccessTest(QueryTest, AssertsCompiledSQL):
"""test access of columns after _from_selectable has been applied"""
__dialect__ = "default"
def test_from_self(self):
User = self.classes.User
sess = create_session()
q = sess.query(User).from_self()
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name FROM (SELECT users.id AS users_id, users.name "
"AS users_name FROM users) AS anon_1 WHERE anon_1.users_name = "
":name_1",
)
def test_from_self_twice(self):
User = self.classes.User
sess = create_session()
q = sess.query(User).from_self(User.id, User.name).from_self()
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.anon_2_users_id AS anon_1_anon_2_users_id, "
"anon_1.anon_2_users_name AS anon_1_anon_2_users_name FROM "
"(SELECT anon_2.users_id AS anon_2_users_id, anon_2.users_name "
"AS anon_2_users_name FROM (SELECT users.id AS users_id, "
"users.name AS users_name FROM users) AS anon_2) AS anon_1 "
"WHERE anon_1.anon_2_users_name = :name_1",
)
def test_select_entity_from(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
q = sess.query(User).select_entity_from(q.statement.subquery())
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE anon_1.name = :name_1",
)
def test_select_entity_from_no_entities(self):
User = self.classes.User
sess = create_session()
assert_raises_message(
sa.exc.ArgumentError,
r"A selectable \(FromClause\) instance is "
"expected when the base alias is being set",
sess.query(User).select_entity_from,
User,
)
def test_select_from_no_aliasing(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
q = sess.query(User).select_from(q.statement.subquery())
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE users.name = :name_1",
)
def test_anonymous_expression(self):
from sqlalchemy.sql import column
sess = create_session()
c1, c2 = column("c1"), column("c2")
q1 = sess.query(c1, c2).filter(c1 == "dog")
q2 = sess.query(c1, c2).filter(c1 == "cat")
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 "
"AS anon_1_c2 FROM (SELECT c1, c2 WHERE "
"c1 = :c1_1 UNION SELECT c1, c2 "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1",
)
def test_anonymous_expression_from_self_twice(self):
from sqlalchemy.sql import column
sess = create_session()
c1, c2 = column("c1"), column("c2")
q1 = sess.query(c1, c2).filter(c1 == "dog")
q1 = q1.from_self().from_self()
self.assert_compile(
q1.order_by(c1),
"SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS "
"anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 "
"AS anon_2_c2 "
"FROM (SELECT c1, c2 WHERE c1 = :c1_1) AS "
"anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1",
)
def test_anonymous_expression_union(self):
from sqlalchemy.sql import column
sess = create_session()
c1, c2 = column("c1"), column("c2")
q1 = sess.query(c1, c2).filter(c1 == "dog")
q2 = sess.query(c1, c2).filter(c1 == "cat")
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 "
"AS anon_1_c2 FROM (SELECT c1, c2 WHERE "
"c1 = :c1_1 UNION SELECT c1, c2 "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1",
)
def test_table_anonymous_expression_from_self_twice(self):
from sqlalchemy.sql import column
sess = create_session()
t1 = table("t1", column("c1"), column("c2"))
q1 = sess.query(t1.c.c1, t1.c.c2).filter(t1.c.c1 == "dog")
q1 = q1.from_self().from_self()
self.assert_compile(
q1.order_by(t1.c.c1),
"SELECT anon_1.anon_2_t1_c1 "
"AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 "
"AS anon_1_anon_2_t1_c2 "
"FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, "
"anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 "
"AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 "
"ORDER BY anon_1.anon_2_t1_c1",
)
def test_anonymous_labeled_expression(self):
sess = create_session()
c1, c2 = column("c1"), column("c2")
q1 = sess.query(c1.label("foo"), c2.label("bar")).filter(c1 == "dog")
q2 = sess.query(c1.label("foo"), c2.label("bar")).filter(c1 == "cat")
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM "
"(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT "
"c1 AS foo, c2 AS bar "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo",
)
def test_anonymous_expression_plus_aliased_join(self):
"""test that the 'dont alias non-ORM' rule remains for other
kinds of aliasing when _from_selectable() is used."""
User = self.classes.User
Address = self.classes.Address
addresses = self.tables.addresses
sess = create_session()
q1 = sess.query(User.id).filter(User.id > 5)
q1 = q1.from_self()
q1 = q1.join(User.addresses, aliased=True).order_by(
User.id, Address.id, addresses.c.id
)
self.assert_compile(
q1,
"SELECT anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS users_id FROM users "
"WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_id, addresses_1.id, addresses.id",
)
class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("type", String(20)),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("type", String(20)),
)
Table(
"c",
metadata,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
Column("age", Integer),
)
Table(
"d",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("dede", Integer),
)
@classmethod
def setup_classes(cls):
a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d)
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(B):
pass
class D(A):
pass
mapper(
A,
a,
polymorphic_identity="a",
polymorphic_on=a.c.type,
with_polymorphic=("*", None),
properties={"link": relation(B, uselist=False, backref="back")},
)
mapper(
B,
b,
polymorphic_identity="b",
polymorphic_on=b.c.type,
with_polymorphic=("*", None),
)
mapper(C, c, inherits=B, polymorphic_identity="c")
mapper(D, d, inherits=A, polymorphic_identity="d")
@classmethod
def insert_data(cls):
A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B)
sess = create_session()
sess.add_all(
[
B(name="b1"),
A(name="a1", link=C(name="c1", age=3)),
C(name="c2", age=6),
A(name="a2"),
]
)
sess.flush()
def test_add_entity_equivalence(self):
A, C, B = (self.classes.A, self.classes.C, self.classes.B)
sess = create_session()
for q in [
sess.query(A, B).join(A.link),
sess.query(A).join(A.link).add_entity(B),
]:
eq_(
q.all(),
[
(
A(bid=2, id=1, name="a1", type="a"),
C(age=3, id=2, name="c1", type="c"),
)
],
)
for q in [
sess.query(B, A).join(B.back),
sess.query(B).join(B.back).add_entity(A),
sess.query(B).add_entity(A).join(B.back),
]:
eq_(
q.all(),
[
(
C(age=3, id=2, name="c1", type="c"),
A(bid=2, id=1, name="a1", type="a"),
)
],
)
class InstancesTest(QueryTest, AssertsCompiledSQL):
def test_from_alias_two(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select(
use_labels=True, order_by=[text("ulist.id"), addresses.c.id]
)
)
sess = create_session()
q = sess.query(User)
def go():
result = (
q.options(contains_alias("ulist"), contains_eager("addresses"))
.from_statement(query)
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_three(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select(
use_labels=True, order_by=[text("ulist.id"), addresses.c.id]
)
)
sess = create_session()
# better way. use select_entity_from()
def go():
result = (
sess.query(User)
.select_entity_from(query.subquery())
.options(contains_eager("addresses"))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
sess = create_session()
# same thing, but alias addresses, so that the adapter
# generated by select_entity_from() is wrapped within
# the adapter created by contains_eager()
adalias = addresses.alias()
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(adalias)
.select(use_labels=True, order_by=[text("ulist.id"), adalias.c.id])
)
def go():
result = (
sess.query(User)
.select_entity_from(query.subquery())
.options(contains_eager("addresses", alias=adalias))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_one(self):
addresses, User = (self.tables.addresses, self.classes.User)
sess = create_session()
# test that contains_eager suppresses the normal outer join rendering
q = (
sess.query(User)
.outerjoin(User.addresses)
.options(contains_eager(User.addresses))
.order_by(User.id, addresses.c.id)
)
self.assert_compile(
q.with_labels().statement,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS "
"addresses_email_address, users.id AS "
"users_id, users.name AS users_name FROM "
"users LEFT OUTER JOIN addresses ON "
"users.id = addresses.user_id ORDER BY "
"users.id, addresses.id",
dialect=default.DefaultDialect(),
)
def go():
assert self.static.user_address_result == q.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_two(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = create_session()
adalias = addresses.alias()
q = (
sess.query(User)
.select_entity_from(users.outerjoin(adalias))
.options(contains_eager(User.addresses, alias=adalias))
.order_by(User.id, adalias.c.id)
)
def go():
eq_(self.static.user_address_result, q.all())
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_four(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = create_session()
selectquery = users.outerjoin(addresses).select(
users.c.id < 10,
use_labels=True,
order_by=[users.c.id, addresses.c.id],
)
q = sess.query(User)
def go():
result = (
q.options(contains_eager("addresses"))
.from_statement(selectquery)
.all()
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_aliased(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = sess.query(User)
# Aliased object
adalias = aliased(Address)
def go():
result = (
q.options(contains_eager("addresses", alias=adalias))
.outerjoin(adalias, User.addresses)
.order_by(User.id, adalias.id)
)
assert self.static.user_address_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_alias(self):
orders, items, users, order_items, User = (
self.tables.orders,
self.tables.items,
self.tables.users,
self.tables.order_items,
self.classes.User,
)
sess = create_session()
q = sess.query(User)
oalias = orders.alias("o1")
ialias = items.alias("i1")
query = (
users.outerjoin(oalias)
.outerjoin(order_items)
.outerjoin(ialias)
.select(use_labels=True)
.order_by(users.c.id, oalias.c.id, ialias.c.id)
)
# test using Alias with more than one level deep
# new way:
# from sqlalchemy.orm.strategy_options import Load
# opt = Load(User).contains_eager('orders', alias=oalias).
# contains_eager('items', alias=ialias)
def go():
result = list(
q.options(
contains_eager("orders", alias=oalias),
contains_eager("orders.items", alias=ialias),
).from_statement(query)
)
assert self.static.user_order_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = create_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
result = (
q.options(
contains_eager(User.orders, alias=oalias),
contains_eager(User.orders, Order.items, alias=ialias),
)
.outerjoin(oalias, User.orders)
.outerjoin(ialias, oalias.items)
.order_by(User.id, oalias.id, ialias.id)
)
assert self.static.user_order_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased_of_type(self):
# test newer style that does not use the alias parameter
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = create_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
result = (
q.options(
contains_eager(User.orders.of_type(oalias)).contains_eager(
oalias.items.of_type(ialias)
)
)
.outerjoin(User.orders.of_type(oalias))
.outerjoin(oalias.items.of_type(ialias))
.order_by(User.id, oalias.id, ialias.id)
)
assert self.static.user_order_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining(self):
"""test that contains_eager() 'chains' by default."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = create_session()
q = (
sess.query(User)
.join(User.addresses)
.join(Address.dingaling)
.options(contains_eager(User.addresses, Address.dingaling))
)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(
name="ed",
addresses=[
Address(
email_address="ed@wood.com",
dingaling=Dingaling(data="ding 1/2"),
)
],
),
User(
name="fred",
addresses=[
Address(
email_address="fred@fred.com",
dingaling=Dingaling(data="ding 2/5"),
)
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining_aliased_endpoint(self):
"""test that contains_eager() 'chains' by default and supports
an alias at the end."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = create_session()
da = aliased(Dingaling, name="foob")
q = (
sess.query(User)
.join(User.addresses)
.join(da, Address.dingaling)
.options(
contains_eager(User.addresses, Address.dingaling, alias=da)
)
)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(
name="ed",
addresses=[
Address(
email_address="ed@wood.com",
dingaling=Dingaling(data="ding 1/2"),
)
],
),
User(
name="fred",
addresses=[
Address(
email_address="fred@fred.com",
dingaling=Dingaling(data="ding 2/5"),
)
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
def test_mixed_eager_contains_with_limit(self):
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
sess = create_session()
q = sess.query(User)
def go():
# outerjoin to User.orders, offset 1/limit 2 so we get user
# 7 + second two orders. then joinedload the addresses.
# User + Order columns go into the subquery, address left
# outer joins to the subquery, joinedloader for User.orders
# applies context.adapter to result rows. This was
# [ticket:1180].
result = (
q.outerjoin(User.orders)
.options(
joinedload(User.addresses), contains_eager(User.orders)
)
.order_by(User.id, Order.id)
.offset(1)
.limit(2)
.all()
)
eq_(
result,
[
User(
id=7,
addresses=[
Address(
email_address="jack@bean.com", user_id=7, id=1
)
],
name="jack",
orders=[
Order(
address_id=1,
user_id=7,
description="order 3",
isopen=1,
id=3,
),
Order(
address_id=None,
user_id=7,
description="order 5",
isopen=0,
id=5,
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
# same as above, except Order is aliased, so two adapters
# are applied by the eager loader
oalias = aliased(Order)
result = (
q.outerjoin(oalias, User.orders)
.options(
joinedload(User.addresses),
contains_eager(User.orders, alias=oalias),
)
.order_by(User.id, oalias.id)
.offset(1)
.limit(2)
.all()
)
eq_(
result,
[
User(
id=7,
addresses=[
Address(
email_address="jack@bean.com", user_id=7, id=1
)
],
name="jack",
orders=[
Order(
address_id=1,
user_id=7,
description="order 3",
isopen=1,
id=3,
),
Order(
address_id=None,
user_id=7,
description="order 5",
isopen=0,
id=5,
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
class MixedEntitiesTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_values(self):
Address, users, User = (
self.classes.Address,
self.tables.users,
self.classes.User,
)
sess = create_session()
assert list(sess.query(User).values()) == list()
sel = users.select(User.id.in_([7, 8])).alias()
q = sess.query(User)
q2 = q.select_entity_from(sel).values(User.name)
eq_(list(q2), [("jack",), ("ed",)])
q = sess.query(User)
q2 = q.order_by(User.id).values(
User.name, User.name + " " + cast(User.id, String(50))
)
eq_(
list(q2),
[
("jack", "jack 7"),
("ed", "ed 8"),
("fred", "fred 9"),
("chuck", "chuck 10"),
],
)
q2 = (
q.join("addresses")
.filter(User.name.like("%e%"))
.order_by(User.id, Address.id)
.values(User.name, Address.email_address)
)
eq_(
list(q2),
[
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("fred", "fred@fred.com"),
],
)
q2 = (
q.join("addresses")
.filter(User.name.like("%e%"))
.order_by(desc(Address.email_address))
.slice(1, 3)
.values(User.name, Address.email_address)
)
eq_(list(q2), [("ed", "ed@wood.com"), ("ed", "ed@lala.com")])
adalias = aliased(Address)
q2 = (
q.join(adalias, "addresses")
.filter(User.name.like("%e%"))
.order_by(adalias.email_address)
.values(User.name, adalias.email_address)
)
eq_(
list(q2),
[
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("ed", "ed@wood.com"),
("fred", "fred@fred.com"),
],
)
q2 = q.values(func.count(User.name))
assert next(q2) == (4,)
q2 = (
q.select_entity_from(sel)
.filter(User.id == 8)
.values(User.name, sel.c.name, User.name)
)
eq_(list(q2), [("ed", "ed", "ed")])
# using User.xxx is alised against "sel", so this query returns nothing
q2 = (
q.select_entity_from(sel)
.filter(User.id == 8)
.filter(User.id > sel.c.id)
.values(User.name, sel.c.name, User.name)
)
eq_(list(q2), [])
# whereas this uses users.c.xxx, is not aliased and creates a new join
q2 = (
q.select_entity_from(sel)
.filter(users.c.id == 8)
.filter(users.c.id > sel.c.id)
.values(users.c.name, sel.c.name, User.name)
)
eq_(list(q2), [("ed", "jack", "jack")])
def test_alias_naming(self):
User = self.classes.User
sess = create_session()
ua = aliased(User, name="foobar")
q = sess.query(ua)
self.assert_compile(
q,
"SELECT foobar.id AS foobar_id, "
"foobar.name AS foobar_name FROM users AS foobar",
)
@testing.fails_on("mssql", "FIXME: unknown")
def test_values_specific_order_by(self):
users, User = self.tables.users, self.classes.User
sess = create_session()
assert list(sess.query(User).values()) == list()
sel = users.select(User.id.in_([7, 8])).alias()
q = sess.query(User)
u2 = aliased(User)
q2 = (
q.select_entity_from(sel)
.filter(u2.id > 1)
.order_by(User.id, sel.c.id, u2.id)
.values(User.name, sel.c.name, u2.name)
)
eq_(
list(q2),
[
("jack", "jack", "jack"),
("jack", "jack", "ed"),
("jack", "jack", "fred"),
("jack", "jack", "chuck"),
("ed", "ed", "jack"),
("ed", "ed", "ed"),
("ed", "ed", "fred"),
("ed", "ed", "chuck"),
],
)
@testing.fails_on("mssql", "FIXME: unknown")
@testing.fails_on(
"oracle", "Oracle doesn't support boolean expressions as " "columns"
)
@testing.fails_on(
"postgresql+pg8000",
"pg8000 parses the SQL itself before passing on "
"to PG, doesn't parse this",
)
@testing.fails_on("firebird", "unknown")
def test_values_with_boolean_selects(self):
"""Tests a values clause that works with select boolean
evaluations"""
User = self.classes.User
sess = create_session()
q = sess.query(User)
q2 = (
q.group_by(User.name.like("%j%"))
.order_by(desc(User.name.like("%j%")))
.values(User.name.like("%j%"), func.count(User.name.like("%j%")))
)
eq_(list(q2), [(True, 1), (False, 3)])
q2 = q.order_by(desc(User.name.like("%j%"))).values(
User.name.like("%j%")
)
eq_(list(q2), [(True,), (False,), (False,), (False,)])
def test_correlated_subquery(self):
"""test that a subquery constructed from ORM attributes doesn't leak
out those entities to the outermost query."""
Address, users, User = (
self.classes.Address,
self.tables.users,
self.classes.User,
)
sess = create_session()
subq = (
select([func.count()])
.where(User.id == Address.user_id)
.correlate(users)
.label("count")
)
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name="jack"), 1),
(User(id=8, name="ed"), 3),
(User(id=9, name="fred"), 1),
],
)
# same thing without the correlate, as it should
# not be needed
subq = (
select([func.count()])
.where(User.id == Address.user_id)
.label("count")
)
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name="jack"), 1),
(User(id=8, name="ed"), 3),
(User(id=9, name="fred"), 1),
],
)
def test_column_queries(self):
Address, users, User = (
self.classes.Address,
self.tables.users,
self.classes.User,
)
sess = create_session()
eq_(
sess.query(User.name).all(),
[("jack",), ("ed",), ("fred",), ("chuck",)],
)
sel = users.select(User.id.in_([7, 8])).alias()
q = sess.query(User.name)
q2 = q.select_entity_from(sel).all()
eq_(list(q2), [("jack",), ("ed",)])
eq_(
sess.query(User.name, Address.email_address)
.filter(User.id == Address.user_id)
.all(),
[
("jack", "jack@bean.com"),
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("fred", "fred@fred.com"),
],
)
eq_(
sess.query(User.name, func.count(Address.email_address))
.outerjoin(User.addresses)
.group_by(User.id, User.name)
.order_by(User.id)
.all(),
[("jack", 1), ("ed", 3), ("fred", 1), ("chuck", 0)],
)
eq_(
sess.query(User, func.count(Address.email_address))
.outerjoin(User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(User(name="jack", id=7), 1),
(User(name="ed", id=8), 3),
(User(name="fred", id=9), 1),
(User(name="chuck", id=10), 0),
],
)
eq_(
sess.query(func.count(Address.email_address), User)
.outerjoin(User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(1, User(name="jack", id=7)),
(3, User(name="ed", id=8)),
(1, User(name="fred", id=9)),
(0, User(name="chuck", id=10)),
],
)
adalias = aliased(Address)
eq_(
sess.query(User, func.count(adalias.email_address))
.outerjoin(adalias, "addresses")
.group_by(User)
.order_by(User.id)
.all(),
[
(User(name="jack", id=7), 1),
(User(name="ed", id=8), 3),
(User(name="fred", id=9), 1),
(User(name="chuck", id=10), 0),
],
)
eq_(
sess.query(func.count(adalias.email_address), User)
.outerjoin(adalias, User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(1, User(name="jack", id=7)),
(3, User(name="ed", id=8)),
(1, User(name="fred", id=9)),
(0, User(name="chuck", id=10)),
],
)
# select from aliasing + explicit aliasing
eq_(
sess.query(User, adalias.email_address, adalias.id)
.outerjoin(adalias, User.addresses)
.from_self(User, adalias.email_address)
.order_by(User.id, adalias.id)
.all(),
[
(User(name="jack", id=7), "jack@bean.com"),
(User(name="ed", id=8), "ed@wood.com"),
(User(name="ed", id=8), "ed@bettyboop.com"),
(User(name="ed", id=8), "ed@lala.com"),
(User(name="fred", id=9), "fred@fred.com"),
(User(name="chuck", id=10), None),
],
)
# anon + select from aliasing
eq_(
sess.query(User)
.join(User.addresses, aliased=True)
.filter(Address.email_address.like("%ed%"))
.from_self()
.all(),
[User(name="ed", id=8), User(name="fred", id=9)],
)
# test eager aliasing, with/without select_entity_from aliasing
for q in [
sess.query(User, adalias.email_address)
.outerjoin(adalias, User.addresses)
.options(joinedload(User.addresses))
.order_by(User.id, adalias.id)
.limit(10),
sess.query(User, adalias.email_address, adalias.id)
.outerjoin(adalias, User.addresses)
.from_self(User, adalias.email_address)
.options(joinedload(User.addresses))
.order_by(User.id, adalias.id)
.limit(10),
]:
eq_(
q.all(),
[
(
User(
addresses=[
Address(
user_id=7,
email_address="jack@bean.com",
id=1,
)
],
name="jack",
id=7,
),
"jack@bean.com",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="ed@wood.com",
id=2,
),
Address(
user_id=8,
email_address="ed@bettyboop.com",
id=3,
),
Address(
user_id=8,
email_address="ed@lala.com",
id=4,
),
],
name="ed",
id=8,
),
"ed@wood.com",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="ed@wood.com",
id=2,
),
Address(
user_id=8,
email_address="ed@bettyboop.com",
id=3,
),
Address(
user_id=8,
email_address="ed@lala.com",
id=4,
),
],
name="ed",
id=8,
),
"ed@bettyboop.com",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="ed@wood.com",
id=2,
),
Address(
user_id=8,
email_address="ed@bettyboop.com",
id=3,
),
Address(
user_id=8,
email_address="ed@lala.com",
id=4,
),
],
name="ed",
id=8,
),
"ed@lala.com",
),
(
User(
addresses=[
Address(
user_id=9,
email_address="fred@fred.com",
id=5,
)
],
name="fred",
id=9,
),
"fred@fred.com",
),
(User(addresses=[], name="chuck", id=10), None),
],
)
def test_column_from_limited_joinedload(self):
User = self.classes.User
sess = create_session()
def go():
results = (
sess.query(User)
.limit(1)
.options(joinedload("addresses"))
.add_column(User.name)
.all()
)
eq_(results, [(User(name="jack"), "jack")])
self.assert_sql_count(testing.db, go, 1)
@testing.fails_on("firebird", "unknown")
def test_self_referential(self):
Order = self.classes.Order
sess = create_session()
oalias = aliased(Order)
for q in [
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.order_by(Order.id, oalias.id),
sess.query(Order, oalias)
.from_self()
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.order_by(Order.id, oalias.id),
# same thing, but reversed.
sess.query(oalias, Order)
.from_self()
.filter(oalias.user_id == Order.user_id)
.filter(oalias.user_id == 7)
.filter(Order.id < oalias.id)
.order_by(oalias.id, Order.id),
# here we go....two layers of aliasing
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.from_self()
.order_by(Order.id, oalias.id)
.limit(10)
.options(joinedload(Order.items)),
# gratuitous four layers
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.from_self()
.from_self()
.from_self()
.order_by(Order.id, oalias.id)
.limit(10)
.options(joinedload(Order.items)),
]:
eq_(
q.all(),
[
(
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
),
(
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
),
(
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
),
],
)
# ensure column expressions are taken from inside the subquery, not
# restated at the top
q = (
sess.query(
Order.id, Order.description, literal_column("'q'").label("foo")
)
.filter(Order.description == "order 3")
.from_self()
)
self.assert_compile(
q,
"SELECT anon_1.orders_id AS "
"anon_1_orders_id, anon_1.orders_descriptio"
"n AS anon_1_orders_description, "
"anon_1.foo AS anon_1_foo FROM (SELECT "
"orders.id AS orders_id, "
"orders.description AS orders_description, "
"'q' AS foo FROM orders WHERE "
"orders.description = :description_1) AS "
"anon_1",
)
eq_(q.all(), [(3, "order 3", "q")])
def test_multi_mappers(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
test_session = create_session()
(user7, user8, user9, user10) = test_session.query(User).all()
(
address1,
address2,
address3,
address4,
address5,
) = test_session.query(Address).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
sess = create_session()
selectquery = users.outerjoin(addresses).select(
use_labels=True, order_by=[users.c.id, addresses.c.id]
)
eq_(
list(sess.query(User, Address).from_statement(selectquery)),
expected,
)
sess.expunge_all()
for address_entity in (Address, aliased(Address)):
q = (
sess.query(User)
.add_entity(address_entity)
.outerjoin(address_entity, "addresses")
.order_by(User.id, address_entity.id)
)
eq_(q.all(), expected)
sess.expunge_all()
q = sess.query(User).add_entity(address_entity)
q = q.join(address_entity, "addresses")
q = q.filter_by(email_address="ed@bettyboop.com")
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = (
sess.query(User, address_entity)
.join(address_entity, "addresses")
.filter_by(email_address="ed@bettyboop.com")
)
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = (
sess.query(User, address_entity)
.join(address_entity, "addresses")
.options(joinedload("addresses"))
.filter_by(email_address="ed@bettyboop.com")
)
eq_(list(util.OrderedSet(q.all())), [(user8, address3)])
sess.expunge_all()
def test_aliased_multi_mappers(self):
User, addresses, users, Address = (
self.classes.User,
self.tables.addresses,
self.tables.users,
self.classes.Address,
)
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
(address1, address2, address3, address4, address5) = sess.query(
Address
).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
q = sess.query(User)
adalias = addresses.alias("adalias")
q = q.add_entity(Address, alias=adalias).select_entity_from(
users.outerjoin(adalias)
)
result = q.order_by(User.id, adalias.c.id).all()
assert result == expected
sess.expunge_all()
q = sess.query(User).add_entity(Address, alias=adalias)
result = (
q.select_entity_from(users.outerjoin(adalias))
.filter(adalias.c.email_address == "ed@bettyboop.com")
.all()
)
assert result == [(user8, address3)]
def test_with_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = sess.query(User).filter(User.id == 7).order_by(User.name)
self.assert_compile(
q.with_entities(User.id, Address).filter(
Address.user_id == User.id
),
"SELECT users.id AS users_id, addresses.id "
"AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address"
" AS addresses_email_address FROM users, "
"addresses WHERE users.id = :id_1 AND "
"addresses.user_id = users.id ORDER BY "
"users.name",
)
def test_multi_columns(self):
users, User = self.tables.users, self.classes.User
sess = create_session()
expected = [(u, u.name) for u in sess.query(User).all()]
for add_col in (User.name, users.c.name):
assert sess.query(User).add_column(add_col).all() == expected
sess.expunge_all()
assert_raises(
sa_exc.ArgumentError, sess.query(User).add_column, object()
)
def test_add_multi_columns(self):
"""test that add_column accepts a FROM clause."""
users, User = self.tables.users, self.classes.User
sess = create_session()
eq_(
sess.query(User.id).add_column(users).all(),
[(7, 7, "jack"), (8, 8, "ed"), (9, 9, "fred"), (10, 10, "chuck")],
)
def test_multi_columns_2(self):
"""test aliased/nonalised joins with the usage of add_column()"""
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0)]
q = sess.query(User)
q = (
q.group_by(users)
.order_by(User.id)
.outerjoin("addresses")
.add_column(func.count(Address.id).label("count"))
)
eq_(q.all(), expected)
sess.expunge_all()
adalias = aliased(Address)
q = sess.query(User)
q = (
q.group_by(users)
.order_by(User.id)
.outerjoin(adalias, "addresses")
.add_column(func.count(adalias.id).label("count"))
)
eq_(q.all(), expected)
sess.expunge_all()
# TODO: figure out why group_by(users) doesn't work here
count = func.count(addresses.c.id).label("count")
s = (
select([users, count])
.select_from(users.outerjoin(addresses))
.group_by(*[c for c in users.c])
.order_by(User.id)
)
q = sess.query(User)
result = q.add_column(s.selected_columns.count).from_statement(s).all()
assert result == expected
def test_raw_columns(self):
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [
(user7, 1, "Name:jack"),
(user8, 3, "Name:ed"),
(user9, 1, "Name:fred"),
(user10, 0, "Name:chuck"),
]
adalias = addresses.alias()
q = (
create_session()
.query(User)
.add_column(func.count(adalias.c.id))
.add_column(("Name:" + users.c.name))
.outerjoin(adalias, "addresses")
.group_by(users)
.order_by(users.c.id)
)
assert q.all() == expected
# test with a straight statement
s = select(
[
users,
func.count(addresses.c.id).label("count"),
("Name:" + users.c.name).label("concat"),
],
from_obj=[users.outerjoin(addresses)],
group_by=[c for c in users.c],
order_by=[users.c.id],
)
q = create_session().query(User)
result = (
q.add_column(s.selected_columns.count)
.add_column(s.selected_columns.concat)
.from_statement(s)
.all()
)
assert result == expected
sess.expunge_all()
# test with select_entity_from()
q = (
create_session()
.query(User)
.add_column(func.count(addresses.c.id))
.add_column(("Name:" + users.c.name))
.select_entity_from(users.outerjoin(addresses))
.group_by(users)
.order_by(users.c.id)
)
assert q.all() == expected
sess.expunge_all()
q = (
create_session()
.query(User)
.add_column(func.count(addresses.c.id))
.add_column(("Name:" + users.c.name))
.outerjoin("addresses")
.group_by(users)
.order_by(users.c.id)
)
assert q.all() == expected
sess.expunge_all()
q = (
create_session()
.query(User)
.add_column(func.count(adalias.c.id))
.add_column(("Name:" + users.c.name))
.outerjoin(adalias, "addresses")
.group_by(users)
.order_by(users.c.id)
)
assert q.all() == expected
sess.expunge_all()
def test_expression_selectable_matches_mzero(self):
User, Address = self.classes.User, self.classes.Address
ua = aliased(User)
aa = aliased(Address)
s = create_session()
for crit, j, exp in [
(
User.id + Address.id,
User.addresses,
"SELECT users.id + addresses.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id",
),
(
User.id + Address.id,
Address.user,
"SELECT users.id + addresses.id AS anon_1 "
"FROM addresses JOIN users ON users.id = "
"addresses.user_id",
),
(
Address.id + User.id,
User.addresses,
"SELECT addresses.id + users.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id",
),
(
User.id + aa.id,
(aa, User.addresses),
"SELECT users.id + addresses_1.id AS anon_1 "
"FROM users JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id",
),
]:
q = s.query(crit)
mzero = q._entity_zero()
is_(mzero, q._query_entity_zero().entity_zero)
q = q.join(j)
self.assert_compile(q, exp)
for crit, j, exp in [
(
ua.id + Address.id,
ua.addresses,
"SELECT users_1.id + addresses.id AS anon_1 "
"FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
),
(
ua.id + aa.id,
(aa, ua.addresses),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM users AS users_1 JOIN addresses AS "
"addresses_1 ON users_1.id = addresses_1.user_id",
),
(
ua.id + aa.id,
(ua, aa.user),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM addresses AS addresses_1 JOIN "
"users AS users_1 "
"ON users_1.id = addresses_1.user_id",
),
]:
q = s.query(crit)
mzero = q._entity_zero()
is_(mzero, q._query_entity_zero().entity_zero)
q = q.join(j)
self.assert_compile(q, exp)
def test_aliased_adapt_on_names(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
agg_address = sess.query(
Address.id,
func.sum(func.length(Address.email_address)).label(
"email_address"
),
).group_by(Address.user_id)
ag1 = aliased(Address, agg_address.subquery())
ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True)
# first, without adapt on names, 'email_address' isn't matched up - we
# get the raw "address" element in the SELECT
self.assert_compile(
sess.query(User, ag1.email_address)
.join(ag1, User.addresses)
.filter(ag1.email_address > 5),
"SELECT users.id "
"AS users_id, users.name AS users_name, addresses.email_address "
"AS addresses_email_address FROM addresses, users JOIN "
"(SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE addresses.email_address > :email_address_1",
)
# second, 'email_address' matches up to the aggregate, and we get a
# smooth JOIN from users->subquery and that's it
self.assert_compile(
sess.query(User, ag2.email_address)
.join(ag2, User.addresses)
.filter(ag2.email_address > 5),
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email_address AS anon_1_email_address FROM users "
"JOIN ("
"SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE anon_1.email_address > :email_address_1",
)
class SelectFromTest(QueryTest, AssertsCompiledSQL):
run_setup_mappers = None
__dialect__ = "default"
def test_replace_with_select(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8])).alias()
sess = create_session()
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(id=7), User(id=8)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.filter(User.id == 8)
.all(),
[User(id=8)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.order_by(desc(User.name))
.all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.order_by(asc(User.name))
.all(),
[User(name="ed", id=8), User(name="jack", id=7)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.options(joinedload("addresses"))
.first(),
User(name="jack", addresses=[Address(id=1)]),
)
def test_select_from_aliased_one(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
not_users = table("users", column("id"), column("name"))
ua = aliased(User, select([not_users]).alias(), adapt_on_names=True)
q = sess.query(User.name).select_entity_from(ua).order_by(User.name)
self.assert_compile(
q,
"SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1 ORDER BY anon_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_select_from_aliased_two(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
ua = aliased(User)
q = sess.query(User.name).select_entity_from(ua).order_by(User.name)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"ORDER BY users_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_select_from_core_alias_one(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
ua = users.alias()
q = sess.query(User.name).select_entity_from(ua).order_by(User.name)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"ORDER BY users_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_differentiate_self_external(self):
"""test some different combinations of joining a table to a subquery of
itself."""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
sel = sess.query(User).filter(User.id.in_([7, 8])).subquery()
ualias = aliased(User)
self.assert_compile(
sess.query(User).join(sel, User.id > sel.c.id),
"SELECT users.id AS users_id, users.name AS users_name FROM "
"users JOIN (SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN ([POSTCOMPILE_id_1])) "
"AS anon_1 ON users.id > anon_1.id",
)
self.assert_compile(
sess.query(ualias)
.select_entity_from(sel)
.filter(ualias.id > sel.c.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1, ("
"SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"WHERE users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias)
.select_entity_from(sel)
.join(ualias, ualias.id > sel.c.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias)
.select_entity_from(sel)
.join(ualias, ualias.id > User.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
salias = aliased(User, sel)
self.assert_compile(
sess.query(salias).join(ualias, ualias.id > salias.id),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias).select_entity_from(
join(sel, ualias, ualias.id > sel.c.id)
),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id "
"IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
def test_aliased_class_vs_nonaliased(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
ua = aliased(User)
sess = create_session()
self.assert_compile(
sess.query(User).select_from(ua).join(User, ua.name > User.name),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users AS users_1 JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(User.name)
.select_from(ua)
.join(User, ua.name > User.name),
"SELECT users.name AS users_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua.name)
.select_from(ua)
.join(User, ua.name > User.name),
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, ua.name > User.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, User.name > ua.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users.name > users_1.name",
)
# this is tested in many other places here, just adding it
# here for comparison
self.assert_compile(
sess.query(User.name).select_entity_from(
users.select().where(users.c.id > 5).subquery()
),
"SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, "
"users.name AS name FROM users WHERE users.id > :id_1) AS anon_1",
)
def test_join_no_order_by(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
eq_(
sess.query(User).select_entity_from(sel.subquery()).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_join_relname_from_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(
User,
users,
properties={
"addresses": relationship(
mapper(Address, addresses), backref="user"
)
},
)
sess = create_session()
self.assert_compile(
sess.query(User).select_from(Address).join("user"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_filter_by_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(
User,
users,
properties={"addresses": relationship(mapper(Address, addresses))},
)
sess = create_session()
self.assert_compile(
sess.query(User)
.select_from(Address)
.filter_by(email_address="ed")
.join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id "
"WHERE addresses.email_address = :email_address_1",
)
def test_join_ent_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(
User,
users,
properties={"addresses": relationship(mapper(Address, addresses))},
)
sess = create_session()
self.assert_compile(
sess.query(User).select_from(Address).join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_join(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join("addresses")
.add_entity(Address)
.order_by(User.id)
.order_by(Address.id)
.all(),
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
adalias = aliased(Address)
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join(adalias, "addresses")
.add_entity(adalias)
.order_by(User.id)
.order_by(adalias.id)
.all(),
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
def test_more_joins(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
mapper(
User,
users,
properties={"orders": relationship(Order, backref="user")},
) # o2m, m2o
mapper(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
)
},
) # m2m
mapper(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
)
},
) # m2m
mapper(Keyword, keywords)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join("orders", "items", "keywords")
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join("orders", "items", "keywords", aliased=True)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
def test_very_nested_joins_with_joinedload(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
mapper(
User,
users,
properties={"orders": relationship(Order, backref="user")},
) # o2m, m2o
mapper(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
)
},
) # m2m
mapper(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
)
},
) # m2m
mapper(Keyword, keywords)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
def go():
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.options(
joinedload("orders")
.joinedload("items")
.joinedload("keywords")
)
.join("orders", "items", "keywords", aliased=True)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[
User(
name="jack",
orders=[
Order(
description="order 1",
items=[
Item(
description="item 1",
keywords=[
Keyword(name="red"),
Keyword(name="big"),
Keyword(name="round"),
],
),
Item(
description="item 2",
keywords=[
Keyword(name="red", id=2),
Keyword(name="small", id=5),
Keyword(name="square"),
],
),
Item(
description="item 3",
keywords=[
Keyword(name="green", id=3),
Keyword(name="big", id=4),
Keyword(name="round", id=6),
],
),
],
),
Order(
description="order 3",
items=[
Item(
description="item 3",
keywords=[
Keyword(name="green", id=3),
Keyword(name="big", id=4),
Keyword(name="round", id=6),
],
),
Item(
description="item 4", keywords=[], id=4
),
Item(
description="item 5", keywords=[], id=5
),
],
),
Order(
description="order 5",
items=[
Item(description="item 5", keywords=[])
],
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
sel2 = orders.select(orders.c.id.in_([1, 2, 3]))
eq_(
sess.query(Order)
.select_entity_from(sel2.subquery())
.join("items", "keywords")
.filter(Keyword.name == "red")
.order_by(Order.id)
.all(),
[
Order(description="order 1", id=1),
Order(description="order 2", id=2),
],
)
eq_(
sess.query(Order)
.select_entity_from(sel2.subquery())
.join("items", "keywords", aliased=True)
.filter(Keyword.name == "red")
.order_by(Order.id)
.all(),
[
Order(description="order 1", id=1),
Order(description="order 2", id=2),
],
)
def test_replace_with_eager(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(Address, order_by=addresses.c.id)
},
)
mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
def go():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel.subquery())
.order_by(User.id)
.all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel.subquery())
.filter(User.id == 8)
.order_by(User.id)
.all(),
[
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel.subquery())
.order_by(User.id)[1],
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
),
)
self.assert_sql_count(testing.db, go, 1)
class CustomJoinTest(QueryTest):
run_setup_mappers = None
def test_double_same_mappers(self):
"""test aliasing of joins with a custom join condition"""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
mapper(Address, addresses)
mapper(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="select",
order_by=items.c.id,
)
},
)
mapper(Item, items)
mapper(
User,
users,
properties=dict(
addresses=relationship(Address, lazy="select"),
open_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id
),
lazy="select",
),
closed_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id
),
lazy="select",
),
),
)
q = create_session().query(User)
eq_(
q.join("open_orders", "items", aliased=True)
.filter(Item.id == 4)
.join("closed_orders", "items", aliased=True)
.filter(Item.id == 3)
.all(),
[User(id=7)],
)
class ExternalColumnsTest(QueryTest):
"""test mappers with SQL-expressions added as column properties."""
run_setup_mappers = None
def test_external_columns_bad(self):
users, User = self.tables.users, self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"not represented in the mapper's table",
mapper,
User,
users,
properties={"concat": (users.c.id * 2)},
)
clear_mappers()
def test_external_columns(self):
"""test querying mappings that reference external columns or
selectables."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"concat": column_property((users.c.id * 2)),
"count": column_property(
select(
[func.count(addresses.c.id)],
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
},
)
mapper(Address, addresses, properties={"user": relationship(User)})
sess = create_session()
sess.query(Address).options(joinedload("user")).all()
eq_(
sess.query(User).all(),
[
User(id=7, concat=14, count=1),
User(id=8, concat=16, count=3),
User(id=9, concat=18, count=1),
User(id=10, concat=20, count=0),
],
)
address_result = [
Address(id=1, user=User(id=7, concat=14, count=1)),
Address(id=2, user=User(id=8, concat=16, count=3)),
Address(id=3, user=User(id=8, concat=16, count=3)),
Address(id=4, user=User(id=8, concat=16, count=3)),
Address(id=5, user=User(id=9, concat=18, count=1)),
]
eq_(sess.query(Address).all(), address_result)
# run the eager version twice to test caching of aliased clauses
for x in range(2):
sess.expunge_all()
def go():
eq_(
sess.query(Address)
.options(joinedload("user"))
.order_by(Address.id)
.all(),
address_result,
)
self.assert_sql_count(testing.db, go, 1)
ualias = aliased(User)
eq_(
sess.query(Address, ualias).join(ualias, "user").all(),
[(address, address.user) for address in address_result],
)
eq_(
sess.query(Address, ualias.count)
.join(ualias, "user")
.join("user", aliased=True)
.order_by(Address.id)
.all(),
[
(Address(id=1), 1),
(Address(id=2), 3),
(Address(id=3), 3),
(Address(id=4), 3),
(Address(id=5), 1),
],
)
eq_(
sess.query(Address, ualias.concat, ualias.count)
.join(ualias, "user")
.join("user", aliased=True)
.order_by(Address.id)
.all(),
[
(Address(id=1), 14, 1),
(Address(id=2), 16, 3),
(Address(id=3), 16, 3),
(Address(id=4), 16, 3),
(Address(id=5), 18, 1),
],
)
ua = aliased(User)
eq_(
sess.query(Address, ua.concat, ua.count)
.select_entity_from(join(Address, ua, "user"))
.options(joinedload(Address.user))
.order_by(Address.id)
.all(),
[
(Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1),
(Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1),
],
)
eq_(
list(
sess.query(Address)
.join("user")
.values(Address.id, User.id, User.concat, User.count)
),
[
(1, 7, 14, 1),
(2, 8, 16, 3),
(3, 8, 16, 3),
(4, 8, 16, 3),
(5, 9, 18, 1),
],
)
eq_(
list(
sess.query(Address, ua)
.select_entity_from(join(Address, ua, "user"))
.values(Address.id, ua.id, ua.concat, ua.count)
),
[
(1, 7, 14, 1),
(2, 8, 16, 3),
(3, 8, 16, 3),
(4, 8, 16, 3),
(5, 9, 18, 1),
],
)
def test_external_columns_joinedload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
# in this test, we have a subquery on User that accesses "addresses",
# underneath an joinedload for "addresses". So the "addresses" alias
# adapter needs to *not* hit the "addresses" table within the "user"
# subquery, but "user" still needs to be adapted. therefore the long
# standing practice of eager adapters being "chained" has been removed
# since its unnecessary and breaks this exact condition.
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", order_by=addresses.c.id
),
"concat": column_property((users.c.id * 2)),
"count": column_property(
select(
[func.count(addresses.c.id)],
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
},
)
mapper(Address, addresses)
mapper(
Order, orders, properties={"address": relationship(Address)}
) # m2o
sess = create_session()
def go():
o1 = (
sess.query(Order)
.options(joinedload("address").joinedload("user"))
.get(1)
)
eq_(o1.address.user.count, 1)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
o1 = (
sess.query(Order)
.options(joinedload("address").joinedload("user"))
.first()
)
eq_(o1.address.user.count, 1)
self.assert_sql_count(testing.db, go, 1)
def test_external_columns_compound(self):
# see [ticket:2167] for background
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"fullname": column_property(users.c.name.label("x"))},
)
mapper(
Address,
addresses,
properties={
"username": column_property(
select([User.fullname])
.where(User.id == addresses.c.user_id)
.label("y")
)
},
)
sess = create_session()
a1 = sess.query(Address).first()
eq_(a1.username, "jack")
sess = create_session()
a1 = sess.query(Address).from_self().first()
eq_(a1.username, "jack")
class TestOverlyEagerEquivalentCols(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"sub1",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("data", String(50)),
)
Table(
"sub2",
metadata,
Column(
"id",
Integer,
ForeignKey("base.id"),
ForeignKey("sub1.id"),
primary_key=True,
),
Column("data", String(50)),
)
def test_equivs(self):
base, sub2, sub1 = (
self.tables.base,
self.tables.sub2,
self.tables.sub1,
)
class Base(fixtures.ComparableEntity):
pass
class Sub1(fixtures.ComparableEntity):
pass
class Sub2(fixtures.ComparableEntity):
pass
mapper(
Base,
base,
properties={
"sub1": relationship(Sub1),
"sub2": relationship(Sub2),
},
)
mapper(Sub1, sub1)
mapper(Sub2, sub2)
sess = create_session()
s11 = Sub1(data="s11")
s12 = Sub1(data="s12")
s2 = Sub2(data="s2")
b1 = Base(data="b1", sub1=[s11], sub2=[])
b2 = Base(data="b1", sub1=[s12], sub2=[])
sess.add(b1)
sess.add(b2)
sess.flush()
# there's an overlapping ForeignKey here, so not much option except
# to artificially control the flush order
b2.sub2 = [s2]
sess.flush()
q = sess.query(Base).outerjoin("sub2", aliased=True)
assert sub1.c.id not in q._filter_aliases[0].equivalents
eq_(
sess.query(Base)
.join("sub1")
.outerjoin("sub2", aliased=True)
.filter(Sub1.id == 1)
.one(),
b1,
)
class LabelCollideTest(fixtures.MappedTest):
"""Test handling for a label collision. This collision
is handled by core, see ticket:2702 as well as
test/sql/test_selectable->WithLabelsTest. here we want
to make sure the end result is as we expect.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("bar_id", Integer),
)
Table("foo_bar", metadata, Column("id", Integer, primary_key=True))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
mapper(cls.classes.Foo, cls.tables.foo)
mapper(cls.classes.Bar, cls.tables.foo_bar)
@classmethod
def insert_data(cls):
s = Session()
s.add_all([cls.classes.Foo(id=1, bar_id=2), cls.classes.Bar(id=3)])
s.commit()
def test_overlap_plain(self):
s = Session()
row = s.query(self.classes.Foo, self.classes.Bar).all()[0]
def go():
eq_(row.Foo.id, 1)
eq_(row.Foo.bar_id, 2)
eq_(row.Bar.id, 3)
# all three columns are loaded independently without
# overlap, no additional SQL to load all attributes
self.assert_sql_count(testing.db, go, 0)
def test_overlap_subquery(self):
s = Session()
row = s.query(self.classes.Foo, self.classes.Bar).from_self().all()[0]
def go():
eq_(row.Foo.id, 1)
eq_(row.Foo.bar_id, 2)
eq_(row.Bar.id, 3)
# all three columns are loaded independently without
# overlap, no additional SQL to load all attributes
self.assert_sql_count(testing.db, go, 0)
| 31.979512
| 79
| 0.468559
|
a0f536df8662e21fa2b830dc2f08d2252a0fa37e
| 5,691
|
py
|
Python
|
filesdb/read_bigquery.py
|
remram44/filesdb
|
3f8a6f3edbb0763489a563f6165ffb4f2d26362d
|
[
"MIT"
] | 3
|
2019-03-16T03:16:13.000Z
|
2020-07-13T01:53:19.000Z
|
filesdb/read_bigquery.py
|
ViDA-NYU/filesdb
|
3f8a6f3edbb0763489a563f6165ffb4f2d26362d
|
[
"MIT"
] | 8
|
2019-01-03T17:16:03.000Z
|
2019-01-04T20:06:31.000Z
|
filesdb/read_bigquery.py
|
VIDA-NYU/filesdb
|
8d978feba1f9040d736c60250be971e807d0aca5
|
[
"MIT"
] | null | null | null |
"""Get projects, versions, and downloads from BigQuery data.
The most reliable source of information for PyPI is now on Google BigQuery. It
is the only way to get recent updates, bulk data, and some fields like download
counts.
This script allows you to import a CSV exported from BigQuery into the
database.
Use this SQL query on BigQuery:
SELECT
name, version,
upload_time, filename, size,
path,
python_version, packagetype,
md5_digest, sha256_digest
FROM `the-psf.pypi.distribution_metadata`
WHERE upload_time > :last_upload_time
ORDER BY upload_time ASC
"""
import csv
from datetime import datetime
import logging
import os
import re
import sys
from . import database
from .utils import normalize_project_name
logger = logging.getLogger('filesdb.read_bigquery')
class BatchInserter(object):
BATCH_SIZE = 500
def __init__(self, db, query, dependencies=()):
self.db = db
self.query = query
self.values = []
self.dependencies = dependencies
def insert(self, **kwargs):
self.values.append(kwargs)
if len(self.values) > self.BATCH_SIZE:
values = self.values[:self.BATCH_SIZE]
self.values = self.values[self.BATCH_SIZE:]
for dep in self.dependencies:
dep.flush()
self.db.execute(self.query.values(values))
def flush(self):
if self.values:
for dep in self.dependencies:
dep.flush()
self.db.execute(self.query.values(self.values))
self.values = []
def main():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(name)s: %(message)s",
)
if len(sys.argv) == 3 and sys.argv[1] == 'csv':
filename = sys.argv[2]
if not os.path.isfile(sys.argv[2]):
print("Usage: read_bigquery.py <exported-table.csv>",
file=sys.stderr)
sys.exit(2)
with open(filename, 'r') as fp:
total_rows = sum(1 for _ in fp) - 1
with open(filename, 'r') as fp:
reader = csv.DictReader(fp)
header = reader.fieldnames
assert header == [
'name', 'version',
'upload_time', 'filename', 'size',
'path',
'python_version', 'packagetype',
'md5_digest', 'sha256_digest',
]
read_data(reader, total_rows)
elif (
len(sys.argv) == 3
and sys.argv[1] == 'query'
and os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
):
from_time = datetime.fromisoformat(sys.argv[2])
from google.cloud import bigquery
client = bigquery.Client()
query = '''\
SELECT
name, version,
upload_time, filename, size,
path,
python_version, packagetype,
md5_digest, sha256_digest
FROM `the-psf.pypi.distribution_metadata`
WHERE upload_time > "{time}"
ORDER BY upload_time ASC
'''.format(
time=from_time.strftime('%Y-%m-%d %H:%M:%S')
)
job = client.query(query)
total_rows = sum(1 for _ in job.result())
iterator = job.result()
read_data(iterator, total_rows)
else:
print(
"Usage:\n read_bigquery.py csv <exported-table.csv>\n"
+ " GOOGLE_APPLICATION_CREDENTIALS=account.json "
+ "read_bigquery.py query <isodate>",
file=sys.stderr,
)
sys.exit(2)
def read_data(iterator, total_rows):
with database.connect() as db:
projects = BatchInserter(
db,
database.insert_or_ignore(database.projects),
)
versions = BatchInserter(
db,
database.insert_or_ignore(database.project_versions),
[projects],
)
downloads = BatchInserter(
db,
database.insert_or_ignore(database.downloads),
[projects],
)
for i, row in enumerate(iterator):
if i % 10000 == 0:
logger.info("%d / %d", i, total_rows)
if row['path']:
url = 'https://files.pythonhosted.org/packages/' + row['path']
else:
url = None
timestamp = row['upload_time']
# datetime if coming from BigQuery, str if coming from CSV
if not isinstance(timestamp, datetime):
timestamp = re.sub(
r'^(20[0-9][0-9]-[0-9][0-9]-[0-9][0-9]) ([0-9][0-9]:[0-9][0-9]:[0-9][0-9])(?:\.[0-9]*)? UTC$',
r'\1T\2',
timestamp,
)
timestamp = datetime.fromisoformat(timestamp)
name = normalize_project_name(row['name'])
projects.insert(
name=name,
)
versions.insert(
project_name=name,
version=row['version'],
)
downloads.insert(
project_name=name,
project_version=row['version'],
name=row['filename'],
size_bytes=int(row['size']),
upload_time=timestamp,
url=url,
type=row['packagetype'],
python_version=row['python_version'],
hash_md5=row['md5_digest'],
hash_sha256=row['sha256_digest'],
)
projects.flush()
versions.flush()
downloads.flush()
if __name__ == '__main__':
main()
| 29.184615
| 114
| 0.537867
|
8dfd8a5070ec05a32a6e15feb9a3235428cbaf13
| 2,846
|
py
|
Python
|
venv/lib/python3.7/site-packages/nltk/corpus/reader/nps_chat.py
|
VighneshHarihar/Newsify
|
321f5f65bb6983c0ca5a3864900b27ce36a32717
|
[
"MIT"
] | 10
|
2021-05-31T07:18:08.000Z
|
2022-03-19T09:20:11.000Z
|
venv/lib/python3.7/site-packages/nltk/corpus/reader/nps_chat.py
|
VighneshHarihar/Newsify
|
321f5f65bb6983c0ca5a3864900b27ce36a32717
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
venv/lib/python3.7/site-packages/nltk/corpus/reader/nps_chat.py
|
VighneshHarihar/Newsify
|
321f5f65bb6983c0ca5a3864900b27ce36a32717
|
[
"MIT"
] | 7
|
2015-09-30T03:00:44.000Z
|
2021-06-04T05:34:39.000Z
|
# Natural Language Toolkit: NPS Chat Corpus Reader
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import re
import textwrap
from nltk.util import LazyConcatenation
from nltk.internals import ElementWrapper
from nltk.tag import map_tag
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.corpus.reader.xmldocs import *
class NPSChatCorpusReader(XMLCorpusReader):
def __init__(self, root, fileids, wrap_etree=False, tagset=None):
XMLCorpusReader.__init__(self, root, fileids, wrap_etree)
self._tagset = tagset
def xml_posts(self, fileids=None):
if self._wrap_etree:
return concat(
[
XMLCorpusView(fileid, "Session/Posts/Post", self._wrap_elt)
for fileid in self.abspaths(fileids)
]
)
else:
return concat(
[
XMLCorpusView(fileid, "Session/Posts/Post")
for fileid in self.abspaths(fileids)
]
)
def posts(self, fileids=None):
return concat(
[
XMLCorpusView(
fileid, "Session/Posts/Post/terminals", self._elt_to_words
)
for fileid in self.abspaths(fileids)
]
)
def tagged_posts(self, fileids=None, tagset=None):
def reader(elt, handler):
return self._elt_to_tagged_words(elt, handler, tagset)
return concat(
[
XMLCorpusView(fileid, "Session/Posts/Post/terminals", reader)
for fileid in self.abspaths(fileids)
]
)
def words(self, fileids=None):
return LazyConcatenation(self.posts(fileids))
def tagged_words(self, fileids=None, tagset=None):
return LazyConcatenation(self.tagged_posts(fileids, tagset))
def _wrap_elt(self, elt, handler):
return ElementWrapper(elt)
def _elt_to_words(self, elt, handler):
return [self._simplify_username(t.attrib["word"]) for t in elt.findall("t")]
def _elt_to_tagged_words(self, elt, handler, tagset=None):
tagged_post = [
(self._simplify_username(t.attrib["word"]), t.attrib["pos"])
for t in elt.findall("t")
]
if tagset and tagset != self._tagset:
tagged_post = [
(w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_post
]
return tagged_post
@staticmethod
def _simplify_username(word):
if "User" in word:
word = "U" + word.split("User", 1)[1]
elif isinstance(word, bytes):
word = word.decode("ascii")
return word
| 30.934783
| 84
| 0.590654
|
1ca6d6eecf4e8b46a67201b0f04f0f26600ff7b9
| 2,413
|
py
|
Python
|
gameOfLife/Models/gridItem.py
|
Jack-Dane/Game-of-Life
|
ee3387e56f022f1d3cc84699635f68a4015f61fa
|
[
"MIT"
] | null | null | null |
gameOfLife/Models/gridItem.py
|
Jack-Dane/Game-of-Life
|
ee3387e56f022f1d3cc84699635f68a4015f61fa
|
[
"MIT"
] | null | null | null |
gameOfLife/Models/gridItem.py
|
Jack-Dane/Game-of-Life
|
ee3387e56f022f1d3cc84699635f68a4015f61fa
|
[
"MIT"
] | null | null | null |
from gameOfLife.Observers.subject import Subject
class GridItem(Subject):
def __init__(self, x, y):
super(GridItem, self).__init__()
self.x = x
self.y = y
self.active = False
self.nextIteration = False
def __str__(self):
return "1" if self.active else "0"
@property
def color(self):
"""
What color the gridItem should be
:return: A tuple RGB value
"""
if self.active:
return 170, 170, 170
return 0, 0, 0
def markActive(self):
self.active = True
def markInactive(self, nextIteration=False):
"""
Mark the gridItem as inactive
:param nextIteration: If True will set nextIteration to inactive as well
"""
self.active = False
if nextIteration:
self.nextIteration = False
def toggleActive(self):
self.active = not self.active
def getData(self):
return self.active
def shouldChange(self, totalSurrounding):
"""
Based on the surrounding number of acitve tiles should this tile
1. Die if it is alive
2. Reproduce if it is dead
3. Stay alive
:param totalSurrounding: Number of active surrounding tiles
"""
if self.active:
self.shouldDie(totalSurrounding)
else:
self.shouldReproduce(totalSurrounding)
def shouldDie(self, totalSurrounding):
"""
This function is called on an active gridItem to determine if it should die
:param totalSurrounding: Number of active surrounding tiles
"""
if totalSurrounding not in range(2, 4):
self.nextIteration = False
else:
self.nextIteration = True
def shouldReproduce(self, totalSurrounding):
"""
This function is called on an inactive gridItem to determine if it should come
to life in the next iteration
:param totalSurrounding: Number of active surrounding tiles
"""
if totalSurrounding == 3:
self.nextIteration = True
else:
self.nextIteration = False
def update(self):
"""
Make the nextIteration value equal to the current active value
"""
self.active = self.nextIteration
def checkSame(self):
return self.active == self.nextIteration
| 27.735632
| 86
| 0.599254
|
74f2f92413bcc01ee8210ec1fb670b69f37c235a
| 1,248
|
py
|
Python
|
experiments/2014-03-21-interp-GPSS-orig.py
|
jaesikchoi/gpss-research
|
2a64958a018f1668f7b8eedf33c4076a63af7868
|
[
"MIT"
] | 151
|
2015-01-09T19:25:05.000Z
|
2022-01-05T02:05:52.000Z
|
experiments/2014-03-21-interp-GPSS-orig.py
|
jaesikchoi/gpss-research
|
2a64958a018f1668f7b8eedf33c4076a63af7868
|
[
"MIT"
] | 1
|
2016-08-04T13:12:51.000Z
|
2016-08-04T13:12:51.000Z
|
experiments/2014-03-21-interp-GPSS-orig.py
|
jaesikchoi/gpss-research
|
2a64958a018f1668f7b8eedf33c4076a63af7868
|
[
"MIT"
] | 59
|
2015-02-04T19:13:58.000Z
|
2021-07-28T23:36:09.000Z
|
Experiment(description='GPSS interpolation experiment',
data_dir='../data/tsdlr_5050/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=400,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2014-03-21-GPSS-orig/',
iters=250,
base_kernels='SE,LinOLD,PerOLD,RQ',
random_seed=1,
period_heuristic=3,
max_period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=False,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoneKernel()', # Starting kernel
lik='ff.LikGauss()', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'})])
| 36.705882
| 92
| 0.471154
|
a48f2194126b073f9303517415078f5ca13f96ad
| 101
|
py
|
Python
|
src/pmt_hostel_app/apps.py
|
hygull/p-host
|
d310479ce4ca9946bf0bff43fbb527c36c18728b
|
[
"MIT"
] | null | null | null |
src/pmt_hostel_app/apps.py
|
hygull/p-host
|
d310479ce4ca9946bf0bff43fbb527c36c18728b
|
[
"MIT"
] | null | null | null |
src/pmt_hostel_app/apps.py
|
hygull/p-host
|
d310479ce4ca9946bf0bff43fbb527c36c18728b
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class PmtHostelAppConfig(AppConfig):
name = 'pmt_hostel_app'
| 16.833333
| 36
| 0.782178
|
a0d9b51f6437303123930ebe4a7fe49eec274d36
| 3,438
|
py
|
Python
|
python/src/od_lib/definitions/path_definitions.py
|
open-discourse/open-discourse
|
cf7001e0961faa708c50f1415f37fa4a777ef83c
|
[
"MIT"
] | 45
|
2020-12-19T17:34:24.000Z
|
2021-12-11T11:15:28.000Z
|
python/src/od_lib/definitions/path_definitions.py
|
open-discourse/open-discourse
|
cf7001e0961faa708c50f1415f37fa4a777ef83c
|
[
"MIT"
] | 28
|
2020-12-21T09:37:29.000Z
|
2022-02-17T20:58:30.000Z
|
python/src/od_lib/definitions/path_definitions.py
|
open-discourse/open-discourse
|
cf7001e0961faa708c50f1415f37fa4a777ef83c
|
[
"MIT"
] | 2
|
2021-01-17T21:56:54.000Z
|
2021-02-07T17:15:51.000Z
|
import os
# ROOT DIR _________________________________________________________________________________________
ROOT_DIR = os.path.abspath(os.path.join(__file__, "../../../.."))
# DATA _____________________________________________________________________________________________
DATA = os.path.join(ROOT_DIR, "data")
DATA_RAW = os.path.join(DATA, "01_raw")
DATA_CACHE = os.path.join(DATA, "02_cached")
DATA_FINAL = os.path.join(DATA, "03_final")
FINAL = os.path.join(DATA, "03_final")
# MP_BASE_DATA ___________________________________________________________________________________
MP_BASE_DATA = os.path.join(DATA_RAW, "MP_BASE_DATA", "MDB_STAMMDATEN.XML")
# RAW ______________________________________________________________________________________________
RAW_ZIP = os.path.join(DATA_RAW, "zip")
RAW_XML = os.path.join(DATA_RAW, "xml")
RAW_TXT = os.path.join(DATA_RAW, "txt")
# SPEECH CONTENT ___________________________________________________________________________________
SPEECH_CONTENT = os.path.join(DATA_CACHE, "speech_content")
SPEECH_CONTENT_STAGE_01 = os.path.join(SPEECH_CONTENT, "stage_01")
SPEECH_CONTENT_STAGE_02 = os.path.join(SPEECH_CONTENT, "stage_02")
SPEECH_CONTENT_STAGE_03 = os.path.join(SPEECH_CONTENT, "stage_03")
SPEECH_CONTENT_STAGE_04 = os.path.join(SPEECH_CONTENT, "stage_04")
SPEECH_CONTENT_FINAL = os.path.join(SPEECH_CONTENT, "final")
# CONTRIBUTIONS_EXTENDED ___________________________________________________________________________
CONTRIBUTIONS_EXTENDED = os.path.join(DATA_CACHE, "contributions_extended")
CONTRIBUTIONS_EXTENDED_STAGE_01 = os.path.join(CONTRIBUTIONS_EXTENDED, "stage_01")
CONTRIBUTIONS_EXTENDED_STAGE_02 = os.path.join(CONTRIBUTIONS_EXTENDED, "stage_02")
CONTRIBUTIONS_EXTENDED_STAGE_03 = os.path.join(CONTRIBUTIONS_EXTENDED, "stage_03")
CONTRIBUTIONS_EXTENDED_STAGE_04 = os.path.join(CONTRIBUTIONS_EXTENDED, "stage_04")
CONTRIBUTIONS_EXTENDED_FINAL = os.path.join(CONTRIBUTIONS_EXTENDED, "final")
# POLITICIANS ______________________________________________________________________________________
POLITICIANS = os.path.join(DATA_CACHE, "politicians")
POLITICIANS_STAGE_01 = os.path.join(POLITICIANS, "stage_01")
POLITICIANS_STAGE_02 = os.path.join(POLITICIANS, "stage_02")
POLITICIANS_FINAL = os.path.join(POLITICIANS, "final")
# FACTIONS _________________________________________________________________________________________
FACTIONS = os.path.join(DATA_CACHE, "factions")
FACTIONS_STAGE_01 = os.path.join(FACTIONS, "stage_01")
FACTIONS = os.path.join(FACTIONS, "stage_02")
FACTIONS_FINAL = os.path.join(FACTIONS, "final")
# CONTRIBUTIONS_SIMPLIFIED _________________________________________________________________________
CONTRIBUTIONS_SIMPLIFIED = os.path.join(FINAL)
# ELECTORAL_TERMS __________________________________________________________________________________
ELECTORAL_TERMS = os.path.join(FINAL)
# ELECTORAL_TERM_19 ________________________________________________________________________________
ELECTORAL_TERM_19 = os.path.join(DATA_CACHE, "electoral_term_19")
ELECTORAL_TERM_19_STAGE_01 = os.path.join(ELECTORAL_TERM_19, "stage_01")
ELECTORAL_TERM_19_STAGE_02 = os.path.join(ELECTORAL_TERM_19, "stage_02")
ELECTORAL_TERM_19_STAGE_03 = os.path.join(ELECTORAL_TERM_19, "stage_03")
# TOPIC_MODELLING __________________________________________________________________________________
TOPIC_MODELLING = os.path.join(DATA_CACHE, "topic_modelling")
| 52.090909
| 100
| 0.847877
|
b1997b630cddae6676f12dae6b45c2c3d5226a5f
| 39
|
py
|
Python
|
BluePrint/manager.py
|
CodeMath/jinrockets
|
6bb26e9ca66ba951ab2d34bf1ffe79b2c605963f
|
[
"MIT"
] | null | null | null |
BluePrint/manager.py
|
CodeMath/jinrockets
|
6bb26e9ca66ba951ab2d34bf1ffe79b2c605963f
|
[
"MIT"
] | null | null | null |
BluePrint/manager.py
|
CodeMath/jinrockets
|
6bb26e9ca66ba951ab2d34bf1ffe79b2c605963f
|
[
"MIT"
] | null | null | null |
from apps import manager
manager.run()
| 13
| 24
| 0.794872
|
ad4e25809d75346cd77fdcea10e80b42f8ac0bdb
| 1,568
|
py
|
Python
|
demos/space_invaders/space_invaders.py
|
PawseySC/Using-Python-in-HPC
|
8d4de3d2b921dd15ed8800f31d3633d431920f00
|
[
"CC0-1.0"
] | 1
|
2020-11-30T17:40:39.000Z
|
2020-11-30T17:40:39.000Z
|
demos/space_invaders/space_invaders.py
|
PawseySC/Using-Python-in-HPC
|
8d4de3d2b921dd15ed8800f31d3633d431920f00
|
[
"CC0-1.0"
] | null | null | null |
demos/space_invaders/space_invaders.py
|
PawseySC/Using-Python-in-HPC
|
8d4de3d2b921dd15ed8800f31d3633d431920f00
|
[
"CC0-1.0"
] | 1
|
2021-09-02T15:23:45.000Z
|
2021-09-02T15:23:45.000Z
|
#!/usr/bin/env python
import math, time
import tkinter as tk
from tkinter import Tk, Canvas, PhotoImage, Label, Button
class Game(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
parent.title("Space Invaders")
canvas, aliens, lasers = Canvas(parent, width=800, height=400, bg='black'), {}, {}
canvas.pack()
i1, i2 = PhotoImage(format = 'gif', file = "alien.gif"), PhotoImage(format = 'gif', file = "laser.gif")
for x, y, p in [(100+40*j, 160-20*i, 100*i) for i in range(8) for j in range(15)]:
aliens[canvas.create_image(x, y, image = i1)] = p
canvas.bind('<Button-1>', lambda e: lasers.update({canvas.create_image(e.x, 390, image=i2): 10}))
while aliens:
try:
for l in lasers:
canvas.move(l, 0, -5)
if canvas.coords(l)[1]<0:
canvas.delete(l); del lasers[l]
for a in aliens:
canvas.move(a, 2.0*math.sin(time.time()),0)
p = canvas.coords(a)
items = canvas.find_overlapping(p[0]-5, p[1]-5, p[0]+5, p[1]+5)
for i in items[1:2]:
canvas.delete(a); del aliens[a]; canvas.delete(i); del lasers[i]
time.sleep(0.02); root.update()
except: pass
if __name__ == "__main__":
root = tk.Tk()
Game(root).pack(side="top", fill="both", expand=True)
root.mainloop()
| 41.263158
| 111
| 0.531888
|
bfe2625d0ee37e600a9a90424b297c89f1250337
| 226
|
py
|
Python
|
LeetCode/python3/169.py
|
ZintrulCre/LeetCode_Archiver
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 279
|
2019-02-19T16:00:32.000Z
|
2022-03-23T12:16:30.000Z
|
LeetCode/python3/169.py
|
ZintrulCre/LeetCode_Archiver
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 2
|
2019-03-31T08:03:06.000Z
|
2021-03-07T04:54:32.000Z
|
LeetCode/python3/169.py
|
ZintrulCre/LeetCode_Crawler
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 12
|
2019-01-29T11:45:32.000Z
|
2019-02-04T16:31:46.000Z
|
import collections
class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
d = collections.Counter(nums)
return max(d.keys(), key=d.get)
| 18.833333
| 39
| 0.544248
|
eeae0f256764eb009eec857f71eaeee5796d74ea
| 5,041
|
py
|
Python
|
devserver/modules/profile.py
|
leture/django-devserver
|
e6c882fc11fba013b85d37f04db8f11b531eda9a
|
[
"BSD-3-Clause"
] | 467
|
2015-01-02T07:06:13.000Z
|
2022-03-29T14:40:03.000Z
|
devserver/modules/profile.py
|
leture/django-devserver
|
e6c882fc11fba013b85d37f04db8f11b531eda9a
|
[
"BSD-3-Clause"
] | 33
|
2015-01-28T13:08:16.000Z
|
2021-03-08T11:12:50.000Z
|
devserver/modules/profile.py
|
leture/django-devserver
|
e6c882fc11fba013b85d37f04db8f11b531eda9a
|
[
"BSD-3-Clause"
] | 103
|
2015-01-12T07:13:58.000Z
|
2022-02-10T05:00:07.000Z
|
from devserver.modules import DevServerModule
from devserver.utils.time import ms_from_timedelta
from devserver.settings import DEVSERVER_AUTO_PROFILE
from datetime import datetime
import functools
import gc
class ProfileSummaryModule(DevServerModule):
"""
Outputs a summary of cache events once a response is ready.
"""
logger_name = 'profile'
def process_init(self, request):
self.start = datetime.now()
def process_complete(self, request):
duration = datetime.now() - self.start
self.logger.info('Total time to render was %.2fs', ms_from_timedelta(duration) / 1000)
class LeftOversModule(DevServerModule):
"""
Outputs a summary of events the garbage collector couldn't handle.
"""
# TODO: Not even sure this is correct, but the its a general idea
logger_name = 'profile'
def process_init(self, request):
gc.enable()
gc.set_debug(gc.DEBUG_SAVEALL)
def process_complete(self, request):
gc.collect()
self.logger.info('%s objects left in garbage', len(gc.garbage))
from django.template.defaultfilters import filesizeformat
try:
from guppy import hpy
except ImportError:
import warnings
class MemoryUseModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('MemoryUseModule requires guppy to be installed.')
return super(MemoryUseModule, cls).__new__(cls)
else:
class MemoryUseModule(DevServerModule):
"""
Outputs a summary of memory usage of the course of a request.
"""
logger_name = 'profile'
def __init__(self, request):
super(MemoryUseModule, self).__init__(request)
self.hpy = hpy()
self.oldh = self.hpy.heap()
self.logger.info('heap size is %s', filesizeformat(self.oldh.size))
def process_complete(self, request):
newh = self.hpy.heap()
alloch = newh - self.oldh
dealloch = self.oldh - newh
self.oldh = newh
self.logger.info('%s allocated, %s deallocated, heap size is %s', *map(filesizeformat, [alloch.size, dealloch.size, newh.size]))
try:
from line_profiler import LineProfiler
except ImportError:
import warnings
class LineProfilerModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('LineProfilerModule requires line_profiler to be installed.')
return super(LineProfilerModule, cls).__new__(cls)
class devserver_profile(object):
def __init__(self, follow=[]):
pass
def __call__(self, func):
return func
else:
class LineProfilerModule(DevServerModule):
"""
Outputs a Line by Line profile of any @devserver_profile'd functions that were run
"""
logger_name = 'profile'
def process_view(self, request, view_func, view_args, view_kwargs):
request.devserver_profiler = LineProfiler()
request.devserver_profiler_run = False
if (DEVSERVER_AUTO_PROFILE):
_unwrap_closure_and_profile(request.devserver_profiler, view_func)
request.devserver_profiler.enable_by_count()
def process_complete(self, request):
if hasattr(request, 'devserver_profiler_run') and (DEVSERVER_AUTO_PROFILE or request.devserver_profiler_run):
from cStringIO import StringIO
out = StringIO()
if (DEVSERVER_AUTO_PROFILE):
request.devserver_profiler.disable_by_count()
request.devserver_profiler.print_stats(stream=out)
self.logger.info(out.getvalue())
def _unwrap_closure_and_profile(profiler, func):
if not hasattr(func, 'func_code'):
return
profiler.add_function(func)
if func.func_closure:
for cell in func.func_closure:
if hasattr(cell.cell_contents, 'func_code'):
_unwrap_closure_and_profile(profiler, cell.cell_contents)
class devserver_profile(object):
def __init__(self, follow=[]):
self.follow = follow
def __call__(self, func):
def profiled_func(*args, **kwargs):
request = args[0]
if hasattr(request, 'request'):
# We're decorating a Django class-based-view and the first argument is actually self:
request = args[1]
try:
request.devserver_profiler.add_function(func)
request.devserver_profiler_run = True
for f in self.follow:
request.devserver_profiler.add_function(f)
request.devserver_profiler.enable_by_count()
return func(*args, **kwargs)
finally:
request.devserver_profiler.disable_by_count()
return functools.wraps(func)(profiled_func)
| 34.765517
| 140
| 0.629835
|
6cbdd9bad0e605956ba5eb4138840f398d935425
| 2,467
|
py
|
Python
|
kits19cnn/io/custom_transforms.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | 7
|
2019-12-19T01:10:09.000Z
|
2021-07-05T07:35:39.000Z
|
kits19cnn/io/custom_transforms.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | 5
|
2019-12-19T23:03:12.000Z
|
2020-02-06T04:18:34.000Z
|
kits19cnn/io/custom_transforms.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | 1
|
2021-03-20T06:28:37.000Z
|
2021-03-20T06:28:37.000Z
|
from albumentations.core.transforms_interface import DualTransform
import numpy as np
class CenterCrop(DualTransform):
"""
Crop the central part of the input.
Args:
height (int): height of the crop.
width (int): width of the crop.
p (float): probability of applying the transform. Default: 1.
pad_kwargs (dict): kwargs for padding when the crop size is larger than
the input image.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
Note:
It is recommended to use uint8 images as input.
Otherwise the operation will require internal conversion
float32 -> uint8 -> float32 that causes worse performance.
"""
def __init__(self, height, width, always_apply=False, p=1.0,
pad_kwargs={"mode": "constant", "constant_values": 0}):
super(CenterCrop, self).__init__(always_apply, p)
self.height = height
self.width = width
self.pad_kwargs = pad_kwargs
def apply(self, img, **params):
return center_crop(img, self.height, self.width,
pad_kwargs=self.pad_kwargs)
def get_transform_init_args_names(self):
return ("height", "width")
def center_crop(img, crop_height, crop_width, dim=2, pad_kwargs={}):
"""
Center cropping 2D images (channels last)
"""
data_shape, crop_size = img.shape, (crop_height, crop_width)
# list of lower bounds for each axis
lbs = get_lbs_for_center_crop(crop_size, data_shape)
need_to_pad = [[abs(min(0, lbs[d])), abs(min(0, data_shape[d] - (lbs[d] + crop_size[d])))]
for d in range(dim)] + [[0, 0]]
# we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed
ubs = [min(lbs[d] + crop_size[d], data_shape[d]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape[1])] + [slice(lbs[d], ubs[d])
for d in range(dim)]
img_cropped = img[tuple(slicer_data)]
img = np.pad(img_cropped, need_to_pad, **pad_kwargs)
return img
def get_lbs_for_center_crop(crop_size, data_shape):
"""
Fetches the lower bounds for central cropping.
Args:
crop_size: (height, width)
data_shape: (x ,y, c)
"""
lbs = []
for i in range(len(data_shape)-1):
lbs.append((data_shape[i] - crop_size[i]) // 2)
return lbs
| 35.242857
| 101
| 0.627483
|
d565a18b50a9a90ca8c7941d037217eb47fa2002
| 271
|
py
|
Python
|
RandomStringGenerator.py
|
alvin-ylt/Python
|
853128ecda7c7a387e8f392533c760adbf0c9cab
|
[
"Unlicense"
] | null | null | null |
RandomStringGenerator.py
|
alvin-ylt/Python
|
853128ecda7c7a387e8f392533c760adbf0c9cab
|
[
"Unlicense"
] | null | null | null |
RandomStringGenerator.py
|
alvin-ylt/Python
|
853128ecda7c7a387e8f392533c760adbf0c9cab
|
[
"Unlicense"
] | null | null | null |
import random
count = 0
length = int(input("How many characters do you want?"))
string = str()
char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*"
while count < length:
string = random.choice(char) + string
count += 1
print(string)
| 20.846154
| 79
| 0.715867
|
df4769927161c55b774ccb6ea24394c894a1b7a5
| 2,394
|
py
|
Python
|
simple_rl/tasks/cleanup/cleanup_state.py
|
roma-patel/simple_rl
|
721a748dae3296293b66f86cf2eda8c91885c298
|
[
"Apache-2.0"
] | null | null | null |
simple_rl/tasks/cleanup/cleanup_state.py
|
roma-patel/simple_rl
|
721a748dae3296293b66f86cf2eda8c91885c298
|
[
"Apache-2.0"
] | null | null | null |
simple_rl/tasks/cleanup/cleanup_state.py
|
roma-patel/simple_rl
|
721a748dae3296293b66f86cf2eda8c91885c298
|
[
"Apache-2.0"
] | null | null | null |
import copy
import random
from simple_rl.mdp.StateClass import State
from simple_rl.tasks.cleanup.CleanupMDPClass import CleanUpMDP
class CleanUpState(State):
def __init__(self, task, x, y, blocks=[], doors=[], rooms=[]):
'''
:param task: The given CleanUpTask
:param x: Agent x coordinate
:param y: Agent y coordinate
:param blocks: List of blocks
:param doors: List of doors
:param rooms: List of rooms
'''
self.x = x
self.y = y
self.blocks = blocks
self.doors = doors
self.rooms = rooms
self.task = task
State.__init__(self, data=[task, (x, y), blocks, doors, rooms])
def __hash__(self):
alod = [tuple(self.data[i]) for i in range(1, len(self.data))]
alod.append(self.data[0])
return hash(tuple(alod))
def __str__(self):
str_builder = "(" + str(self.x) + ", " + str(self.y) + ")\n"
str_builder += "\nBLOCKS:\n"
for block in self.blocks:
str_builder += str(block) + "\n"
str_builder += "\nDOORS:\n"
for door in self.doors:
str_builder += str(door) + "\n"
str_builder += "\nROOMS:\n"
for room in self.rooms:
str_builder += str(room) + "\n"
return str_builder
@staticmethod
def list_eq(alod1, alod2):
'''
:param alod1: First list
:param alod2: Second list
:return: A boolean indicating whether or not the lists are the same
'''
if len(alod1) != len(alod2):
return False
sa = set(alod2)
for item in alod1:
if item not in sa:
return False
return True
def __eq__(self, other):
return isinstance(other, CleanUpState) and self.x == other.x and self.y == other.y and \
self.list_eq(other.rooms, self.rooms) and self.list_eq(other.doors, self.doors) and \
self.list_eq(other.blocks, self.blocks)
def is_terminal(self):
return CleanUpMDP.is_terminal(self.task, next_state=self)
def copy(self):
new_blocks = [block.copy() for block in self.blocks]
new_rooms = [room.copy() for room in self.rooms]
new_doors = [door.copy() for door in self.doors]
return CleanUpState(self.task, self.x, self.y, new_blocks, new_doors, new_rooms)
| 32.794521
| 100
| 0.579365
|
3b977c868550ee897c8ec144d386d20166b5fc91
| 3,862
|
py
|
Python
|
egs/wenetspeech/ASR/local/preprocess_wenetspeech.py
|
pingfengluo/icefall
|
c2c3e2ba76ecf231fb2b8bfabdc1da0dac4bd377
|
[
"Apache-2.0"
] | null | null | null |
egs/wenetspeech/ASR/local/preprocess_wenetspeech.py
|
pingfengluo/icefall
|
c2c3e2ba76ecf231fb2b8bfabdc1da0dac4bd377
|
[
"Apache-2.0"
] | null | null | null |
egs/wenetspeech/ASR/local/preprocess_wenetspeech.py
|
pingfengluo/icefall
|
c2c3e2ba76ecf231fb2b8bfabdc1da0dac4bd377
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Johns Hopkins University (Piotr Żelasko)
# Copyright 2021 Xiaomi Corp. (Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from pathlib import Path
from lhotse import CutSet, SupervisionSegment
from lhotse.recipes.utils import read_manifests_if_cached
# Similar text filtering and normalization procedure as in:
# https://github.com/SpeechColab/WenetSpeech/blob/main/toolkits/kaldi/wenetspeech_data_prep.sh
def normalize_text(
utt: str,
# punct_pattern=re.compile(r"<(COMMA|PERIOD|QUESTIONMARK|EXCLAMATIONPOINT)>"),
punct_pattern=re.compile(r"<(PERIOD|QUESTIONMARK|EXCLAMATIONPOINT)>"),
whitespace_pattern=re.compile(r"\s\s+"),
) -> str:
return whitespace_pattern.sub(" ", punct_pattern.sub("", utt))
def has_no_oov(
sup: SupervisionSegment,
oov_pattern=re.compile(r"<(SIL|MUSIC|NOISE|OTHER)>"),
) -> bool:
return oov_pattern.search(sup.text) is None
def preprocess_wenet_speech():
src_dir = Path("data/manifests")
output_dir = Path("data/fbank")
output_dir.mkdir(exist_ok=True)
dataset_parts = (
"L",
"M",
"S",
"DEV",
"TEST_NET",
"TEST_MEETING",
)
logging.info("Loading manifest (may take 10 minutes)")
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts,
output_dir=src_dir,
suffix="jsonl.gz",
)
assert manifests is not None
for partition, m in manifests.items():
logging.info(f"Processing {partition}")
raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
if raw_cuts_path.is_file():
logging.info(f"{partition} already exists - skipping")
continue
# Note this step makes the recipe different than LibriSpeech:
# We must filter out some utterances and remove punctuation
# to be consistent with Kaldi.
logging.info("Filtering OOV utterances from supervisions")
m["supervisions"] = m["supervisions"].filter(has_no_oov)
logging.info(f"Normalizing text in {partition}")
for sup in m["supervisions"]:
sup.text = normalize_text(sup.text)
# Create long-recording cut manifests.
logging.info(f"Processing {partition}")
cut_set = CutSet.from_manifests(
recordings=m["recordings"],
supervisions=m["supervisions"],
)
# Run data augmentation that needs to be done in the
# time domain.
if partition not in ["DEV", "TEST_NET", "TEST_MEETING"]:
logging.info(
f"Speed perturb for {partition} with factors 0.9 and 1.1 "
"(Perturbing may take 8 minutes and saving may take 20 minutes)"
)
cut_set = (
cut_set
+ cut_set.perturb_speed(0.9)
+ cut_set.perturb_speed(1.1)
)
logging.info(f"Saving to {raw_cuts_path}")
cut_set.to_file(raw_cuts_path)
def main():
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(format=formatter, level=logging.INFO)
preprocess_wenet_speech()
if __name__ == "__main__":
main()
| 33.008547
| 94
| 0.656396
|
c59360db7cbc9ba9afbe8490a17d6afc996012dc
| 10,264
|
py
|
Python
|
py27/bacpypes/task.py
|
mteter-upenn/bacpypes
|
88623988103a48a3f5c8dfd0eb0ca7ffa0bd82b6
|
[
"MIT"
] | null | null | null |
py27/bacpypes/task.py
|
mteter-upenn/bacpypes
|
88623988103a48a3f5c8dfd0eb0ca7ffa0bd82b6
|
[
"MIT"
] | null | null | null |
py27/bacpypes/task.py
|
mteter-upenn/bacpypes
|
88623988103a48a3f5c8dfd0eb0ca7ffa0bd82b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Task
"""
import sys
from time import time as _time
from heapq import heapify, heappush, heappop
from .singleton import SingletonLogging
from .debugging import DebugContents, Logging, ModuleLogger, bacpypes_debugging
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
_task_manager = None
_unscheduled_tasks = []
# only defined for linux platforms
if sys.platform in ('linux2', 'darwin'):
from .event import WaitableEvent
#
# _Trigger
#
# An instance of this class is used in the task manager to break
# the asyncore.loop() call. In this case, handle_read will
# immediately "clear" the event.
#
class _Trigger(WaitableEvent, Logging):
def handle_read(self):
if _debug: _Trigger._debug("handle_read")
# read in the character, highlander
data = self.recv(1)
if _debug: _Trigger._debug(" - data: %r", data)
else:
_Trigger = None
#
# _Task
#
class _Task(DebugContents, Logging):
_debug_contents = ('taskTime', 'isScheduled')
def __init__(self):
self.taskTime = None
self.isScheduled = False
def install_task(self, when=None, delta=None):
global _task_manager, _unscheduled_tasks
# check for delta from now
if (when is None) and (delta is not None):
if not _task_manager:
raise RuntimeError("no task manager")
when = _task_manager.get_time() + delta
# fallback to the inited value
if when is None:
when = self.taskTime
if when is None:
raise RuntimeError("schedule missing, use zero for 'now'")
self.taskTime = when
# pass along to the task manager
if not _task_manager:
_unscheduled_tasks.append(self)
else:
_task_manager.install_task(self)
def process_task(self):
raise RuntimeError("process_task must be overridden")
def suspend_task(self):
global _task_manager
# pass along to the task manager
if not _task_manager:
_unscheduled_tasks.remove(self)
else:
_task_manager.suspend_task(self)
def resume_task(self):
global _task_manager
_task_manager.resume_task(self)
def __lt__(self, other):
return id(self) < id(other)
#
# OneShotTask
#
class OneShotTask(_Task):
def __init__(self, when=None):
_Task.__init__(self)
self.taskTime = when
#
# OneShotDeleteTask
#
class OneShotDeleteTask(_Task):
def __init__(self, when=None):
_Task.__init__(self)
self.taskTime = when
#
# OneShotFunction
#
@bacpypes_debugging
def OneShotFunction(fn, *args, **kwargs):
class OneShotFunctionTask(OneShotDeleteTask):
def process_task(self):
OneShotFunction._debug("process_task %r %s %s", fn, repr(args), repr(kwargs))
fn(*args, **kwargs)
task = OneShotFunctionTask()
# if there is no task manager, postpone the install
if not _task_manager:
_unscheduled_tasks.append(task)
else:
task.install_task(_task_manager.get_time())
return task
#
# FunctionTask
#
def FunctionTask(fn, *args, **kwargs):
_log.debug("FunctionTask %r %r %r", fn, args, kwargs)
class _FunctionTask(OneShotDeleteTask):
def process_task(self):
_log.debug("process_task (%r %r %r)", fn, args, kwargs)
fn(*args, **kwargs)
task = _FunctionTask()
_log.debug(" - task: %r", task)
return task
#
# RecurringTask
#
@bacpypes_debugging
class RecurringTask(_Task):
_debug_contents = ('taskInterval', 'taskIntervalOffset')
def __init__(self, interval=None, offset=None):
if _debug: RecurringTask._debug("__init__ interval=%r offset=%r", interval, offset)
_Task.__init__(self)
# save the interval, but do not automatically install
self.taskInterval = interval
self.taskIntervalOffset = offset
def install_task(self, interval=None, offset=None):
if _debug: RecurringTask._debug("install_task interval=%r offset=%r", interval, offset)
global _task_manager, _unscheduled_tasks
# set the interval if it hasn't already been set
if interval is not None:
self.taskInterval = interval
if offset is not None:
self.taskIntervalOffset = interval
if self.taskInterval is None:
raise RuntimeError("interval unset, use ctor or install_task parameter")
if self.taskInterval <= 0.0:
raise RuntimeError("interval must be greater than zero")
# if there is no task manager, postpone the install
if not _task_manager:
if _debug: RecurringTask._debug(" - no task manager")
_unscheduled_tasks.append(self)
else:
# offset is also in milliseconds to be consistent
if self.taskIntervalOffset:
offset = self.taskIntervalOffset / 1000.0
else:
offset = 0.0
# get ready for the next interval (aligned)
now = _task_manager.get_time()
interval = self.taskInterval / 1000.0
self.taskTime = (now - offset) + interval - ((now - offset) % interval) + offset
if _debug: RecurringTask._debug(" - task time: %r", self.taskTime)
# install it
_task_manager.install_task(self)
#
# RecurringFunctionTask
#
@bacpypes_debugging
def RecurringFunctionTask(interval, fn, *args, **kwargs):
if _debug: RecurringFunctionTask._debug("RecurringFunctionTask %r %r %r", fn, args, kwargs)
class _RecurringFunctionTask(RecurringTask):
def __init__(self, interval):
RecurringTask.__init__(self, interval)
def process_task(self):
if _debug: RecurringFunctionTask._debug("process_task %r %r %r", fn, args, kwargs)
fn(*args, **kwargs)
task = _RecurringFunctionTask(interval)
if _debug: RecurringFunctionTask._debug(" - task: %r", task)
return task
#
# recurring_function
#
@bacpypes_debugging
def recurring_function(interval, offset=None):
def recurring_function_decorator(fn):
class _RecurringFunctionTask(RecurringTask):
def process_task(self):
if _debug: recurring_function._debug("process_task %r", fn)
fn()
def __call__(self, *args, **kwargs):
fn(*args, **kwargs)
task = _RecurringFunctionTask(interval, offset)
task.install_task()
return task
return recurring_function_decorator
#
# TaskManager
#
# @bacpypes_debugging - implicit via metaclass
class TaskManager(SingletonLogging):
def __init__(self):
if _debug: TaskManager._debug("__init__")
global _task_manager, _unscheduled_tasks
# initialize
self.tasks = []
if _Trigger:
self.trigger = _Trigger()
else:
self.trigger = None
# task manager is this instance
_task_manager = self
# there may be tasks created that couldn't be scheduled
# because a task manager wasn't created yet.
if _unscheduled_tasks:
for task in _unscheduled_tasks:
task.install_task()
def get_time(self):
if _debug: TaskManager._debug("get_time")
# return the real time
return _time()
def install_task(self, task):
if _debug: TaskManager._debug("install_task %r @ %r", task, task.taskTime)
# if the taskTime is None is hasn't been computed correctly
if task.taskTime is None:
raise RuntimeError("task time is None")
# if this is already installed, suspend it
if task.isScheduled:
self.suspend_task(task)
# save this in the task list
heappush( self.tasks, (task.taskTime, task) )
if _debug: TaskManager._debug(" - tasks: %r", self.tasks)
task.isScheduled = True
# trigger the event
if self.trigger:
self.trigger.set()
def suspend_task(self, task):
if _debug: TaskManager._debug("suspend_task %r", task)
# remove this guy
for i, (when, curtask) in enumerate(self.tasks):
if task is curtask:
if _debug: TaskManager._debug(" - task found")
del self.tasks[i]
task.isScheduled = False
heapify(self.tasks)
break
else:
if _debug: TaskManager._debug(" - task not found")
# trigger the event
if self.trigger:
self.trigger.set()
def resume_task(self, task):
if _debug: TaskManager._debug("resume_task %r", task)
# just re-install it
self.install_task(task)
def get_next_task(self):
"""get the next task if there's one that should be processed,
and return how long it will be until the next one should be
processed."""
if _debug: TaskManager._debug("get_next_task")
# get the time
now = _time()
task = None
delta = None
if self.tasks:
# look at the first task
when, nxttask = self.tasks[0]
if when <= now:
# pull it off the list and mark that it's no longer scheduled
heappop(self.tasks)
task = nxttask
task.isScheduled = False
if self.tasks:
when, nxttask = self.tasks[0]
# peek at the next task, return how long to wait
delta = max(when - now, 0.0)
else:
delta = when - now
# return the task to run and how long to wait for the next one
return (task, delta)
def process_task(self, task):
if _debug: TaskManager._debug("process_task %r", task)
# process the task
task.process_task()
# see if it should be rescheduled
if isinstance(task, RecurringTask):
task.install_task()
elif isinstance(task, OneShotDeleteTask):
del task
| 27.370667
| 95
| 0.610678
|
4831556a399691340f18e7e3cfe75de3db145e6c
| 2,085
|
py
|
Python
|
convpy/utils.py
|
markovianhq/convpy
|
05c6319aa0817841def85db3ae327fd798452599
|
[
"BSD-3-Clause"
] | 11
|
2018-11-13T12:14:13.000Z
|
2022-03-22T07:56:22.000Z
|
convpy/utils.py
|
markovianhq/convpy
|
05c6319aa0817841def85db3ae327fd798452599
|
[
"BSD-3-Clause"
] | null | null | null |
convpy/utils.py
|
markovianhq/convpy
|
05c6319aa0817841def85db3ae327fd798452599
|
[
"BSD-3-Clause"
] | 4
|
2020-05-19T19:04:57.000Z
|
2021-07-21T02:45:08.000Z
|
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import norm
def prepare_input(y, X, end_time):
y0, y1 = y[np.isnan(y[:, 1])], y[~np.isnan(y[:, 1])]
x0, x1 = X[np.isnan(y[:, 1])], X[~np.isnan(y[:, 1])]
diagonal0, diagonal1 = coo_matrix((y0.shape[0], y0.shape[0])), coo_matrix((y1.shape[0], y1.shape[0]))
diagonal0.setdiag(np.ones(y0.shape[0]))
diagonal1.setdiag(np.ones(y1.shape[0]))
mu = get_regularization_parameter(X)
return {'y0': y0, 'y1': y1, 'x0': x0, 'x1': x1, 'end_time': end_time, 'mu': mu,
'diagonal0': diagonal0, 'diagonal1': diagonal1}
def get_regularization_parameter(X):
n = X.shape[0]
return norm(X) ** 2 / n
def hash_all(x, mod):
x_ = np.zeros(mod)
for i in x:
x_[hash(i) % mod] += 1
return x_
def check_input_data(y):
assert (y[:, 0] >= 0.).all()
assert (y[~np.isnan(y[:, 1])][:, 0] <= y[~np.isnan(y[:, 1])][:, 1]).all()
class MultiEncoder:
def __init__(self, encoders):
"""
:param encoders: iterable of encoders with the property:
encoders[i].features is a subset of encoders[i+1].features
"""
self.encoders = encoders
self.dimension = len(encoders)
def dict_vectorizer(self, state):
num_common_feat = len(set(self.encoders[-1].features).intersection(state))
best_level, best_encoder = self.dimension, self.encoders[-1]
for level, encoder in reversed(list(enumerate(self.encoders))):
partial_features = set(encoder.features)
num_common_feat_level = len(partial_features.intersection(state))
if num_common_feat_level < num_common_feat:
break
else:
best_level, best_encoder = level, encoder
return best_level, best_encoder.dict_vectorizer(state)
class MultiEstimator:
def __init__(self, estimators):
self.estimators = estimators
def predict(self, x_):
level, x = x_
estimator = self.estimators[level]
return estimator.predict(x)
| 29.785714
| 105
| 0.614868
|
3c1a5ab391b33ec79070a7c4505788e4e1a77cc4
| 2,774
|
py
|
Python
|
validation/pull_bwa_novoalign_diffs.py
|
bgruening/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
[
"MIT"
] | 339
|
2015-01-04T13:23:04.000Z
|
2022-03-25T23:09:09.000Z
|
validation/pull_bwa_novoalign_diffs.py
|
bgruening/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
[
"MIT"
] | 39
|
2015-01-14T21:31:09.000Z
|
2021-11-18T15:15:33.000Z
|
validation/pull_bwa_novoalign_diffs.py
|
bgruening/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
[
"MIT"
] | 176
|
2015-01-10T17:40:44.000Z
|
2022-03-25T05:14:21.000Z
|
#!/usr/bin/env python
"""Extract concordant variant differences between bwa and novoalign, focusing on mapping differences.
Requires:
bedtools, pybedtools, vcflib
"""
import os
import subprocess
import sys
import pybedtools
import yaml
def main(config_file):
with open(config_file) as in_handle:
config = yaml.load(in_handle)
out_dir = config["dirs"]["work"]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
consub_vcf = get_concordant_subset(config["calls"]["bwa"],
config["calls"]["novoalign"],
config["ref"], out_dir)
if config.get("callable"):
nocall_vcf = get_nocallable_subset(consub_vcf, config["callable"]["novoalign"])
else:
nocall_vcf = consub_vcf
orig_nocall_vcf = subset_original_vcf(nocall_vcf, config["calls"]["bwa-orig"],
config["ref"])
for fname in [consub_vcf, nocall_vcf, orig_nocall_vcf]:
with open(fname) as in_handle:
total = sum([1 for line in in_handle if not line.startswith("#")])
print fname, total
def subset_original_vcf(base_vcf, orig_vcf, ref_file):
out_file = "{base}-orig.vcf".format(base=os.path.splitext(base_vcf)[0])
if not os.path.exists(out_file):
cmd = "vcfintersect -i {base_vcf} -r {ref_file} {orig_vcf} > {out_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
return out_file
def get_nocallable_subset(base_vcf, cmp_bed):
"""Retrieve subset of calls in base_vcf not in cmp_bed.
"""
out_file = "{base}-nocallable.vcf".format(base=os.path.splitext(base_vcf)[0])
if not os.path.exists(out_file):
base_bt = pybedtools.BedTool(base_vcf)
cmp_bt = pybedtools.BedTool(cmp_bed)
base_bt.intersect(cmp_bt, v=True).saveas(out_file + ".bt")
with open(out_file, "w") as out_handle:
with open(base_vcf) as in_handle:
for line in in_handle:
if line.startswith("#"):
out_handle.write(line)
with open(out_file + ".bt") as in_handle:
for line in in_handle:
out_handle.write(line)
return out_file
def get_concordant_subset(base_vcf, cmp_vcf, ref_file, out_dir):
"""Retrieve subset of calls in base_vcf not in cmp_vcf.
"""
out_file = os.path.join(out_dir, "{base}-unique.vcf"
.format(base=os.path.splitext(os.path.basename(base_vcf))[0]))
if not os.path.exists(out_file):
cmd = "vcfintersect -v -i {cmp_vcf} -r {ref_file} {base_vcf} > {out_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
return out_file
if __name__ == "__main__":
main(sys.argv[1])
| 39.070423
| 101
| 0.629416
|
54f1083328ee91bf226b2c7ab4fa94d2e6cc3a9c
| 663
|
py
|
Python
|
communication/python-multicast/receiver.py
|
pn11/benkyokai
|
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
|
[
"MIT"
] | null | null | null |
communication/python-multicast/receiver.py
|
pn11/benkyokai
|
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
|
[
"MIT"
] | 22
|
2020-03-24T16:24:47.000Z
|
2022-02-26T15:51:18.000Z
|
communication/python-multicast/receiver.py
|
pn11/benkyokai
|
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
|
[
"MIT"
] | null | null | null |
import argparse
import socket
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('target_address', help='target address')
parser.add_argument('port', help='port', nargs='?')
args = parser.parse_args()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', int(args.port)))
s.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(args.target_address) + socket.inet_aton("0.0.0.0"))
while True:
print(s.recvfrom(1500))
return None
if __name__ == "__main__":
main()
| 26.52
| 84
| 0.660633
|
0baaee119169631b349028819450a7400f48192e
| 1,623
|
py
|
Python
|
aaLibrary1/Bellman_Ford_algorithm.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
aaLibrary1/Bellman_Ford_algorithm.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
aaLibrary1/Bellman_Ford_algorithm.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
"""def BellmanFord(edges, num_v, source):
# グラフの初期化
inf = float("inf")
dist = [inf for i in range(num_v)]
dist[source - 1] = 0
# 辺の緩和
for i in range(num_v):
for edge in edges:
if edge[0] != inf and dist[edge[1] - 1] > dist[edge[0] - 1] + edge[2]:
dist[edge[1] - 1] = dist[edge[0] - 1] + edge[2]
if i == num_v - 1:
return -1
return dist"""
edge = [[0,1,2],[0,2,5],[1,2,4],[1,3,6],[1,4,10],[2,3,2],[3,5,1],[4,5,3],[4,6,5],[5,6,9],
[1,0,2],[2,0,5],[2,1,4],[3,1,6],[4,1,10],[3,2,2],[5,3,1],[5,4,3],[6,4,5],[6,5,9]]
def shortest_path(edge,num_v,start):
inf = float("inf")
d = [inf for f in range(num_v)]
d[start] = 0;
while True:
update = False
for e in edge:
if d[e[0]] != inf and d[e[1]] > d[e[0]] + e[2]:
d[e[1]] = d[e[0]] + e[2]
update = True
if not update:
break
return d
print(shortest_path(edge,7,0)[6])
def Bell_shortest_path(edge, num_v, start):
inf = float("inf")
d = [inf for f in range(num_v)]
d[start] = 0;
for i in range(num_v):
update = False
for e in edge:
if d[e[0]] != inf and d[e[1]] > d[e[0]] + e[2]:
d[e[1]] = d[e[0]] + e[2]
update = True
if not update:
break
if i == num_v - 2:
k = d[-1]
if i == num_v - 1 and k != d[-1]:
d[n - 1] = -inf
break
return d
print(-Bell_shortest_path(edge, n, 0)[n - 1])
#print("inf")
#print(BellmanFord(e, n, m))
| 28.982143
| 89
| 0.444239
|
66441878790d90d2abbefe1429a306a68056fa36
| 34,323
|
py
|
Python
|
builder/plotting.py
|
mscroggs/defelement.com
|
0cfc01a0cc3b801ef10ede12bf64f77a6a70efb1
|
[
"CC-BY-4.0",
"MIT"
] | 9
|
2020-12-30T17:24:46.000Z
|
2022-02-16T22:10:47.000Z
|
builder/plotting.py
|
mscroggs/defelement.com
|
0cfc01a0cc3b801ef10ede12bf64f77a6a70efb1
|
[
"CC-BY-4.0",
"MIT"
] | 85
|
2021-01-09T09:44:45.000Z
|
2022-03-26T07:34:00.000Z
|
builder/plotting.py
|
mscroggs/defelement.com
|
0cfc01a0cc3b801ef10ede12bf64f77a6a70efb1
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2022-03-28T08:15:38.000Z
|
2022-03-28T08:15:38.000Z
|
from . import settings
from datetime import datetime
import os
from cairosvg import svg2png
from symfem.finite_element import CiarletElement, DirectElement
from symfem.calculus import grad
from symfem.vectors import vdot, vsub
from symfem.symbolic import subs, x
from symfem.symbolic import PiecewiseFunction
COLORS = {"orange": "#FF8800", "blue": "#44AAFF", "green": "#55FF00",
"purple": "#DD2299", "light blue": "#BBEEFF",
"gray": "#AAAAAA"}
ENTITY_COLORS = [COLORS["orange"], COLORS["blue"], COLORS["green"], COLORS["purple"]]
all_plots = []
class NoPlot:
def to_svg(self, *args, **kwargs):
return ""
def to_tikz(self, *args, **kwargs):
return ""
def img_html(self, *args, **kwargs):
return ""
class Plot:
def __init__(self, padding=15, dim=None, id=None, desc="DefElement plot"):
self.dim = dim
self.id = None
self.desc = desc
if id is not None:
self.id = id.replace(" ", "_")
self.padding = padding
self.height = 2 * self.padding
self.width = 2 * self.padding
self.origin = [self.padding, self.padding]
self._items = []
self._zadd = 0.00001
def map_to_2d(self, point):
if self.dim is not None:
point = tuple(point)
while len(point) < self.dim:
point += (0, )
if len(point) == 3 and self.origin[1] == self.padding:
self.origin[1] += self.padding
self.height += self.padding
if len(point) == 0:
return (self.origin[0], self.origin[1]), 0
if len(point) == 1:
return (self.origin[0] + point[0], self.origin[1]), 0
if len(point) == 2:
return (self.origin[0] + point[0], self.origin[1] + point[1]), 0
if len(point) == 3:
return (
float(self.origin[0] + point[0] + point[1] / 2),
float(self.origin[1] + point[2] - 2 * point[0] / 25 + point[1] / 5)
), float(point[0] - 2 * point[1] + 12 * point[2] / 25)
def _add_line(self, start, end, z, color, width):
line = {"type": "line",
"z-value": z,
"start": start,
"end": end,
"width": width,
"color": color}
self._items.append(line)
self.width = max(self.width, line["start"][0] + self.padding, line["end"][0] + self.padding)
self.height = max(self.height, line["start"][1] + self.padding,
line["end"][1] + self.padding)
def add_line(self, start, end, color="black", width="3px"):
start, z1 = self.map_to_2d(start)
end, z2 = self.map_to_2d(end)
self._add_line(start, end, min(z1, z2), color, width)
def add_bezier(self, start, mid1, mid2, end, color="black", width="3px"):
start, z1 = self.map_to_2d(start)
mid1, _ = self.map_to_2d(mid1)
mid2, _ = self.map_to_2d(mid2)
end, z2 = self.map_to_2d(end)
bez = {"type": "bezier",
"z-value": min(z1, z2),
"start": start,
"mid1": mid1,
"mid2": mid2,
"end": end,
"width": width,
"color": color}
self._items.append(bez)
self.width = max(self.width, bez["start"][0] + self.padding, bez["end"][0] + self.padding)
self.height = max(self.height, bez["start"][1] + self.padding,
bez["end"][1] + self.padding)
def add_arrow(self, start, end, color="black", width="3px"):
start, z1 = self.map_to_2d(start)
end, z2 = self.map_to_2d(end)
a1 = [end[0] + 0.25 * (start[0] - end[0]) - 0.12 * (start[1] - end[1]),
end[1] + 0.25 * (start[1] - end[1]) + 0.12 * (start[0] - end[0])]
a2 = [end[0] + 0.25 * (start[0] - end[0]) + 0.12 * (start[1] - end[1]),
end[1] + 0.25 * (start[1] - end[1]) - 0.12 * (start[0] - end[0])]
self._add_line(start, end, min(z1, z2), color, width)
self._add_line(a1, end, min(z1, z2), color, width)
self._add_line(a2, end, min(z1, z2), color, width)
def add_math(self, text, position, anchor="center", color="black"):
position, z = self.map_to_2d(position)
math = {"type": "math",
"z-value": z + self._zadd,
"text": text,
"position": position,
"anchor": anchor,
"color": color}
self._items.append(math)
def add_to_origin(self, x=None, y=None):
if x is not None:
self.origin[0] += x
if y is not None:
self.origin[1] += y
def set_origin(self, x=None, y=None):
if x is not None:
self.origin[0] = x
if y is not None:
self.origin[1] = y
def add_axes(self, tdim):
if tdim == 1:
self.add_arrow((0, 0), (34, 0))
self.add_math("x", (37, 0), "west")
if tdim == 2:
self.add_arrow((0, 0), (34, 0))
self.add_arrow((0, 0), (0, 34))
self.add_math("x", (37, 0), "west")
self.add_math("y", (0, 40), "south")
if tdim == 3:
self.add_arrow((0, 0, 0), (34, 0, 0))
self.add_arrow((0, 0, 0), (0, 34, 0))
self.add_arrow((0, 0, 0), (0, 0, 34))
self.add_math("x", (37, 0, 0), "west")
self.add_math("y", (0, 40, 0), "south west")
self.add_math("z", (0, 0, 40), "south")
self.set_origin(x=self.width + self.padding * 3)
def add_dof_number(self, position, number, color="black"):
position, z = self.map_to_2d(position)
dofn = {"type": "dofn",
"z-value": z + self._zadd,
"number": number,
"position": position,
"color": color}
self._items.append(dofn)
def add_fill(self, vertices, color="black"):
new_v = []
z = None
for v in vertices:
v, _z = self.map_to_2d(v)
if z is None:
z = _z
z = min(z, _z)
new_v.append(v)
fill = {"type": "fill",
"z-value": z - self._zadd,
"vertices": new_v,
"color": color}
self._items.append(fill)
def to_svg(self, offset=(0, 0)):
now = datetime.now()
out = (f"<svg width='{float(self.width + offset[0])}' "
f"height='{float(self.height + offset[1])}'"
" xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>\n"
f"<title>{self.desc}</title>\n"
"<desc>This plot is from DefElement (https://defelement.com) "
"and is available under a Creative Commons Attribution "
"4.0 International (CC BY 4.0) license: "
"https://creativecommons.org/licenses/by/4.0/</desc>\n"
"<metadata id='license'>\n"
" <rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' "
"xmlns:dc='http://purl.org/dc/elements/1.1/' "
"xmlns:cc='http://web.resource.org/cc/'>\n"
" <cc:Work rdf:about=''>\n"
f" <dc:title>{self.desc}</dc:title>\n"
f" <dc:date>{now.strftime('%Y-%m-%d')}</dc:date>\n"
" <dc:creator>\n"
" <cc:Agent><dc:title>DefElement</dc:title></cc:Agent>\n"
" <cc:Agent><dc:title>Matthew Scroggs</dc:title></cc:Agent>\n"
" </dc:creator>\n"
" <dc:description>See document description</dc:description>\n"
" <cc:license rdf:resource='http://creativecommons.org/licenses/by/4.0/'/>\n"
" <dc:format>image/svg+xml</dc:format>\n"
" <dc:type rdf:resource='http://purl.org/dc/dcmitype/StillImage'/>\n"
" </cc:Work>\n"
" <cc:License rdf:about='http://creativecommons.org/licenses/by/4.0/'>\n"
" <cc:permits rdf:resource='http://web.resource.org/cc/Reproduction'/>\n"
" <cc:permits rdf:resource='http://web.resource.org/cc/Distribution'/>\n"
" <cc:permits rdf:resource='http://web.resource.org/cc/DerivativeWorks'/>\n"
" <cc:requires rdf:resource='http://web.resource.org/cc/Notice'/>\n"
" <cc:requires rdf:resource='http://web.resource.org/cc/Attribution'/>\n"
" </cc:License>\n"
" </rdf:RDF>\n"
"</metadata>\n")
self._items.sort(key=lambda x: x["z-value"])
for i in self._items:
if i["type"] == "fill":
out += "<polygon points='"
out += " ".join([f"{float(offset[0] + j)},{float(offset[1] + self.height - k)}"
for j, k in i["vertices"]])
out += f"' fill='{i['color']}' />"
elif i["type"] == "math":
assert i["color"] == "black"
out += f"<text x='{float(offset[0] + i['position'][0])}' "
out += f"y='{float(offset[1] + self.height - i['position'][1])}' "
out += "class='small' "
if "south" in i["anchor"]:
out += "dominant-baseline='text-bottom' "
elif "north" in i["anchor"]:
out += "dominant-baseline='text-top' "
else:
out += "dominant-baseline='middle' "
if "west" in i["anchor"]:
out += "text-anchor='start' "
elif "east" in i["anchor"]:
out += "text-anchor='end' "
else:
out += "text-anchor='middle' "
out += "style=\"font-family:'CMU Serif',serif;font-style:italic\">"
out += f"{i['text']}</text>\n"
elif i["type"] == "line":
out += f"<line x1='{float(offset[0] + i['start'][0])}' "
out += f"y1='{float(offset[1] + self.height - i['start'][1])}' "
out += f"x2='{float(offset[0] + i['end'][0])}' "
out += f"y2='{float(offset[1] + self.height - i['end'][1])}' "
out += f"stroke-width='{i['width']}' stroke-linecap='round' "
out += f"stroke='{i['color']}' />\n"
elif i["type"] == "bezier":
out += "<path d='"
out += f"M {float(offset[0] + i['start'][0])} "
out += f"{float(offset[1] + self.height - i['start'][1])} "
out += f"C {float(offset[0] + i['mid1'][0])} "
out += f"{float(offset[1] + self.height - i['mid1'][1])}, "
out += f" {float(offset[0] + i['mid2'][0])} "
out += f"{float(offset[1] + self.height - i['mid2'][1])}, "
out += f" {float(offset[0] + i['end'][0])} "
out += f"{float(offset[1] + self.height - i['end'][1])}' "
out += f"stroke-width='{i['width']}' stroke-linecap='round' "
out += f"stroke='{i['color']}' fill='none' />\n"
elif i["type"] == "dofn":
out += f"<circle cx='{float(offset[0] + i['position'][0])}' "
out += f"cy='{float(offset[1] + self.height - i['position'][1])}' "
out += f"r='10px' fill='white' stroke='{i['color']}' "
out += "stroke-width='2px' />"
out += f"<text x='{float(offset[0] + i['position'][0])}' "
out += f"y='{float(offset[1] + self.height - i['position'][1])}' "
out += "font-family=\"'Varela Round',sans\" "
if i["number"] >= 10:
out += "font-size='10' "
out += "text-anchor='middle' dominant-baseline='middle'"
out += f" fill='{i['color']}'>{i['number']}</text>"
else:
raise ValueError(f"Unknown item type: {i['type']}")
out += "</svg>"
return out
def to_tikz(self, offset=(0, 0), reduce_padding=0, reduce_padding_x=None, reduce_padding_y=None,
include_begin_end=True, debug_bg=False):
if reduce_padding_x is None:
reduce_padding_x = reduce_padding
if reduce_padding_y is None:
reduce_padding_y = reduce_padding
out = ("% -------------------------------------------------------\n"
"% This plot is from DefElement (https://defelement.com)\n"
"% and is available under a Creative Commons Attribution\n"
"% 4.0 International (CC BY 4.0) license:\n"
"% https://creativecommons.org/licenses/by/4.0/\n"
"% -------------------------------------------------------\n")
if include_begin_end:
out += "\\begin{tikzpicture}[x=0.2mm,y=0.2mm]\n"
colors = {}
for i in self._items:
if i["color"] not in colors:
if i["color"][0] == "#":
out += f"\\definecolor{{color{len(colors)}}}{{HTML}}{{{i['color'][1:]}}}\n"
colors[i["color"]] = f"color{len(colors)}"
else:
colors[i["color"]] = i["color"]
out += f"\\clip ({reduce_padding_x},{reduce_padding_y}) rectangle "
out += f"({self.width + offset[0] - reduce_padding_x},"
out += f"{self.height + offset[1] - reduce_padding_y});\n"
if debug_bg:
out += f"\\fill[red] ({reduce_padding_x},{reduce_padding_y}) rectangle "
out += f"({self.width + offset[0] - reduce_padding_x},"
out += f"{self.height + offset[1] - reduce_padding_y});\n"
self._items.sort(key=lambda x: x["z-value"])
for i in self._items:
if i["type"] == "fill":
out += f"\\fill[{colors[i['color']]}] "
out += " -- ".join([f"({float(offset[0] + j)},{float(offset[1] + k)})"
for j, k in i["vertices"]])
out += " -- cycle;\n"
elif i["type"] == "math":
assert i["color"] == "black"
out += f"\\node[anchor={i['anchor']}] "
out += f"at ({float(offset[0] + i['position'][0])},"
out += f"{float(offset[1] + i['position'][1])}) "
out += f"{{${i['text']}$}};\n"
elif i["type"] == "line":
out += f"\\draw[{colors[i['color']]},"
out += f"line width={i['width'].replace('px', 'pt')},line cap=round]"
out += f"({float(offset[0] + i['start'][0])},{float(offset[1] + i['start'][1])})"
out += " -- "
out += f"({float(offset[0] + i['end'][0])},{float(offset[1] + i['end'][1])});\n"
elif i["type"] == "bezier":
out += f"\\draw[{colors[i['color']]},"
out += f"line cap=round,line width={i['width'].replace('px', 'pt')}] "
out += f"({float(offset[0] + i['start'][0])},{float(offset[1] + i['start'][1])})"
out += " .. controls "
out += f"({float(offset[0] + i['mid1'][0])},{float(offset[1] + i['mid1'][1])})"
out += " and "
out += f"({float(offset[0] + i['mid2'][0])},{float(offset[1] + i['mid2'][1])})"
out += " .. "
out += f"({float(offset[0] + i['end'][0])},{float(offset[1] + i['end'][1])})"
out += ";\n"
elif i["type"] == "dofn":
out += f"\\draw[{colors[i['color']]},fill=white,line width=1.5pt] "
out += f"({float(offset[0] + i['position'][0])},"
out += f"{float(offset[1] + i['position'][1])})"
out += "circle (6pt);\n"
out += f"\\node[anchor=center] at ({float(offset[0] + i['position'][0])},"
out += f"{float(offset[1] + i['position'][1])}) {{"
if i["number"] >= 10:
out += "\\tiny"
else:
out += "\\small"
out += f"\\color{{{colors[i['color']]}}}{i['number']}}};\n"
else:
raise ValueError(f"Unknown item type: {i['type']}")
if include_begin_end:
out += "\\end{tikzpicture}"
return out
def img_html(self):
global all_plots
from .html import make_html_page
from .markup import cap_first
if self.id is None:
return self.to_svg()
if self.id not in all_plots:
svg = self.to_svg()
tikz = self.to_tikz()
with open(os.path.join(settings.htmlimg_path, f"{self.id}.svg"), "w") as f:
f.write(svg)
with open(os.path.join(settings.htmlimg_path, f"{self.id}.tex"), "w") as f:
f.write(tikz)
svg2png(bytestring=svg, write_to=f"{settings.htmlimg_path}/{self.id}.png")
svg2png(bytestring=svg, write_to=f"{settings.htmlimg_path}/{self.id}-large.png",
scale=3.0)
img_page = f"<h1>{cap_first(self.desc)}</h1>\n"
img_page += f"<center><a href='/img/{self.id}-large.png'>"
img_page += f"<img src='/img/{self.id}.png'></a></center>\n"
img_page += ("<p>"
"This image can be used under a "
"<a href='https://creativecommons.org/licenses/by/4.0/'>"
"Creative Commons Attribution 4.0 International (CC BY 4.0) license"
"</a>: if you use it anywhere, you must attribute DefElement. "
"If you use this image anywhere online, please include a link to "
"DefElement; if you use this image in a paper, please <a href='"
"/citing.html'>cite DefElement</a>."
"</p>")
img_page += "<ul>"
img_page += f"<li><a href='/img/{self.id}-large.png'>Download PNG</a></li>"
img_page += f"<li><a href='/img/{self.id}.svg'>Download SVG</a></li>"
img_page += f"<li><a href='/img/{self.id}.tex'>Download TikZ</a></li>"
img_page += "</ul>"
with open(os.path.join(settings.htmlimg_path, f"{self.id}.html"), "w") as f:
f.write(make_html_page(img_page))
all_plots.append(self.id)
return f"<a href='/img/{self.id}.html'><img src='/img/{self.id}.png'></a>"
def make_lattice(element, n, offset=False, pairs=False):
ref = element.reference
f = element.get_basis_functions()[0]
if isinstance(f, PiecewiseFunction):
m = n // 2
if offset:
assert not pairs
points = []
for piece in f.pieces:
assert len(piece[0]) == 3
og = [float(i) for i in piece[0][0]]
a0 = [float(i - j) for i, j in zip(piece[0][1], piece[0][0])]
a1 = [float(i - j) for i, j in zip(piece[0][2], piece[0][0])]
points += [(og[0] + a0[0] * (i + 0.5) / (m + 1) + a1[0] * (j + 0.5) / (m + 1),
og[1] + a0[1] * (i + 0.5) / (m + 1) + a1[1] * (j + 0.5) / (m + 1))
for i in range(m) for j in range(m - i)]
return points
else:
all_points = []
pairlist = []
s = 0
for j in range(m-1, 0, -1):
pairlist += [(i, i+1) for i in range(s, s+j)]
s += j + 1
for k in range(m + 1):
s = k
for i in range(m, k, -1):
if i != k + 1:
pairlist += [(s, s + i)]
if k != 0:
pairlist += [(s, s + i - 1)]
s += i
for piece in f.pieces:
assert len(piece[0]) == 3
og = [float(i) for i in piece[0][0]]
a0 = [float(i - j) for i, j in zip(piece[0][1], piece[0][0])]
a1 = [float(i - j) for i, j in zip(piece[0][2], piece[0][0])]
all_points.append(
[(og[0] + a0[0] * i / (m - 1) + a1[0] * j / (m - 1),
og[1] + a0[1] * i / (m - 1) + a1[1] * j / (m - 1))
for i in range(m) for j in range(m - i)]
)
return all_points, [pairlist for p in all_points]
if ref.name == "interval":
if offset:
points = [((i + 0.5) / (n + 1), ) for i in range(n)]
else:
points = [(i / (n - 1), ) for i in range(n)]
elif ref.name == "triangle":
if offset:
points = [((i + 0.5) / (n + 1), (j + 0.5) / (n + 1))
for i in range(n) for j in range(n - i)]
else:
points = [(i / (n - 1), j / (n - 1)) for i in range(n) for j in range(n - i)]
elif ref.name == "tetrahedron":
if offset:
points = [((i + 0.5) / (n + 1), (j + 0.5) / (n + 1), (k + 0.5) / (n + 1))
for i in range(n) for j in range(n - i) for k in range(n - i - j)]
else:
points = [(i / (n - 1), j / (n - 1), k / (n - 1))
for i in range(n) for j in range(n - i) for k in range(n - i - j)]
elif ref.name == "quadrilateral":
if offset:
points = [((i + 0.5) / (n + 1), (j + 0.5) / (n + 1))
for i in range(n + 1) for j in range(n + 1)]
else:
points = [(i / (n - 1), j / (n - 1)) for i in range(n) for j in range(n)]
elif ref.name == "hexahedron":
if offset:
points = [((i + 0.5) / (n + 1), (j + 0.5) / (n + 1), (k + 0.5) / (n + 1))
for i in range(n + 1) for j in range(n + 1) for k in range(n + 1)]
else:
points = [(i / (n - 1), j / (n - 1), k / (n - 1))
for i in range(n) for j in range(n) for k in range(n)]
elif ref.name == "prism":
if offset:
points = [((i + 0.5) / (n + 1), (j + 0.5) / (n + 1), (k + 0.5) / (n + 1))
for i in range(n + 1) for j in range(n + 1 - i) for k in range(n + 1)]
else:
points = [(i / (n - 1), j / (n - 1), k / (n - 1))
for i in range(n) for j in range(n - i) for k in range(n + 1)]
else:
raise ValueError("Unknown cell type.")
if not pairs:
return points
assert not offset
if ref.tdim == 1:
pairlist = [(i, i+1) for i, j in enumerate(points[:-1])]
elif ref.tdim == 2:
pairlist = []
if ref.name == "triangle":
s = 0
for j in range(n-1, 0, -1):
pairlist += [(i, i+1) for i in range(s, s+j)]
s += j + 1
for k in range(n + 1):
s = k
for i in range(n, k, -1):
if i != k + 1:
pairlist += [(s, s + i)]
if k != 0:
pairlist += [(s, s + i - 1)]
s += i
elif ref.name == "quadrilateral":
for i in range(n):
for j in range(n):
node = i * n + j
if j != n - 1:
pairlist += [(node, node + 1)]
if i != n - 1:
pairlist += [(node, node + n)]
if j != 0:
pairlist += [(node, node + n - 1)]
return points, pairlist
def get_apply_scale(ref):
if ref.name == "dual polygon":
return lambda p: [j * 50 + 50 if i < 2 else j * 100 for i, j in enumerate(p)]
return lambda p: [i * 100 for i in p]
def plot_reference(ref):
apply_scale = get_apply_scale(ref)
if ref.name == "dual polygon":
ref_id = f"dual-polygon-{ref.number_of_triangles}"
else:
ref_id = ref.name
p = Plot(id=f"ref-{ref_id}", desc=f"{ref.name} reference element")
p.add_axes(ref.tdim)
for d in range(ref.tdim + 1):
for edge in ref.edges:
v1 = apply_scale(ref.vertices[edge[0]])
v2 = apply_scale(ref.vertices[edge[1]])
p.add_line(v1, v2)
for i, e in enumerate(ref.sub_entities(d)):
pos = apply_scale(
[sum(k) / len(k) for k in zip(*[ref.vertices[j] for j in e])])
p.add_dof_number(pos, i, ENTITY_COLORS[d])
p.set_origin(x=p.width + p.padding)
return p
def plot_function(element, dof_i):
return plot_basis_functions(element)[dof_i]
def plot_basis_functions(element):
if element.range_dim == 1:
if element.domain_dim > 2:
return [NoPlot() for i in range(element.space_dim)]
else:
if element.range_dim != element.domain_dim:
return [NoPlot() for i in range(element.space_dim)]
def _norm(a):
try:
a = float(a)
return abs(a)
except TypeError:
return sum(i ** 2 for i in a) ** 0.5
def _to_float(a):
try:
return float(a)
except TypeError:
return [_to_float(b) for b in a]
if element.range_dim == 1:
points, pairs = make_lattice(element, 6, offset=False, pairs=True)
else:
points = make_lattice(element, 6, offset=True)
if element.reference.name == "dual polygon":
ref_id = f"dual-polygon-{element.reference.number_of_triangles}"
else:
ref_id = element.reference.name
f = element.get_basis_functions()[0]
if element.range_dim == 1 and isinstance(f, PiecewiseFunction):
scale = 1 # max(max(_norm(j) for j in i) for i in tab)
apply_scale = get_apply_scale(element.reference)
ps = []
for dofn, function in enumerate(element.get_basis_functions()):
p = Plot(dim=element.domain_dim + 1, padding=30,
id=f"element-{element.name}-{ref_id}-{element.order}-{dofn}",
desc=f"Basis function in a {element.name} space")
dof_entity = (-1, -1)
if isinstance(element, CiarletElement):
if len(element.dofs) > 0:
dof = element.dofs[dofn]
dof_entity = dof.entity
elif isinstance(element, DirectElement):
dof_entity = element._basis_entities[dofn]
if dof_entity[0] >= 2:
if dof_entity[0] == 2:
faces = [dof_entity[1]]
else:
faces = [i for i, _ in enumerate(element.reference.faces)]
for f in faces:
vertices = [apply_scale(element.reference.vertices[i])
for i in element.reference.faces[f]]
if len(vertices) == 4:
vertices = [vertices[0], vertices[1], vertices[3], vertices[2]]
_zadd = p._zadd
p._zadd = 5000
p.add_fill(vertices, color=COLORS["light blue"])
p._zadd = _zadd
for en, edge in enumerate(element.reference.edges):
v1 = apply_scale(element.reference.vertices[edge[0]])
v2 = apply_scale(element.reference.vertices[edge[1]])
if dof_entity[0] == 1 and dof_entity[1] == en:
p.add_line(v1, v2, color=COLORS["blue"])
else:
p.add_line(v1, v2, color=COLORS["gray"])
for pts, prs, (_, f) in zip(points, pairs, function.pieces):
evals = [subs(f, x, p) for p in pts]
deriv = grad(f, element.domain_dim)
for i, j in prs:
d_pi = tuple(2 * a / 3 + b / 3 for a, b in zip(pts[i], pts[j]))
d_pj = tuple(a / 3 + 2 * b / 3 for a, b in zip(pts[i], pts[j]))
di = vdot(subs(deriv, x, pts[i]), vsub(d_pi, pts[i]))
dj = vdot(subs(deriv, x, pts[j]), vsub(d_pj, pts[j]))
p.add_bezier(apply_scale(tuple(pts[i]) + (evals[i] / scale, )),
apply_scale(d_pi + ((evals[i] + di) / scale, )),
apply_scale(d_pj + ((evals[j] + dj) / scale, )),
apply_scale(tuple(pts[j]) + (evals[j] / scale, )),
width="2px", color=COLORS["orange"])
if isinstance(element, CiarletElement) and len(element.dofs) > 0:
if dof.dof_direction() is not None:
p.add_arrow(apply_scale(dof.dof_point()),
apply_scale([i + j / 4 for i, j in zip(dof.dof_point(),
dof.dof_direction())]),
width="2px", color=COLORS["purple"])
p.add_dof_number(apply_scale(dof.dof_point()), dofn, color=COLORS["purple"])
ps.append(p)
return ps
else:
tab = _to_float(element.tabulate_basis(points, "xyz,xyz"))
scale = max(max(_norm(j) for j in i) for i in tab)
apply_scale = get_apply_scale(element.reference)
ps = []
for dofn, function in enumerate(element.get_basis_functions()):
if element.range_dim == 1:
assert element.domain_dim <= 2
p = Plot(
dim=element.domain_dim + 1, padding=30,
id=f"element-{element.name}-{ref_id}-{element.order}-{dofn}",
desc=f"Basis function in a {element.name} space")
else:
assert element.range_dim == element.domain_dim
p = Plot(
dim=element.domain_dim, padding=30,
id=f"element-{element.name}-{ref_id}-{element.order}-{dofn}",
desc=f"Basis function in a {element.name} space")
dof_entity = (-1, -1)
if isinstance(element, CiarletElement):
if len(element.dofs) > 0:
dof = element.dofs[dofn]
dof_entity = dof.entity
elif isinstance(element, DirectElement):
dof_entity = element._basis_entities[dofn]
if dof_entity[0] >= 2:
if dof_entity[0] == 2:
faces = [dof_entity[1]]
else:
faces = [i for i, _ in enumerate(element.reference.faces)]
for f in faces:
vertices = [apply_scale(element.reference.vertices[i])
for i in element.reference.faces[f]]
if len(vertices) == 4:
vertices = [vertices[0], vertices[1], vertices[3], vertices[2]]
_zadd = p._zadd
p._zadd = 5000
p.add_fill(vertices, color=COLORS["light blue"])
p._zadd = _zadd
for en, edge in enumerate(element.reference.edges):
v1 = apply_scale(element.reference.vertices[edge[0]])
v2 = apply_scale(element.reference.vertices[edge[1]])
if dof_entity[0] == 1 and dof_entity[1] == en:
p.add_line(v1, v2, color=COLORS["blue"])
else:
p.add_line(v1, v2, color=COLORS["gray"])
evals = [i[dofn] for i in tab]
if element.range_dim == 1:
deriv = grad(function, element.domain_dim)
for i, j in pairs:
d_pi = tuple(2 * a / 3 + b / 3 for a, b in zip(points[i], points[j]))
d_pj = tuple(a / 3 + 2 * b / 3 for a, b in zip(points[i], points[j]))
di = vdot(subs(deriv, x, points[i]), vsub(d_pi, points[i]))
dj = vdot(subs(deriv, x, points[j]), vsub(d_pj, points[j]))
p.add_bezier(apply_scale(tuple(points[i]) + (evals[i] / scale, )),
apply_scale(d_pi + ((evals[i] + di) / scale, )),
apply_scale(d_pj + ((evals[j] + dj) / scale, )),
apply_scale(tuple(points[j]) + (evals[j] / scale, )),
width="2px", color=COLORS["orange"])
else:
assert element.range_dim == element.domain_dim
for pt, v in zip(points, evals):
wid = 4 * sum(i**2 for i in v) ** 0.5 / scale
p.add_arrow(apply_scale(pt),
apply_scale([i + j / (2.5 * scale) for i, j in zip(pt, v)]),
color=COLORS["orange"], width=f"{wid}px")
if isinstance(element, CiarletElement) and len(element.dofs) > 0:
if dof.dof_direction() is not None:
p.add_arrow(apply_scale(dof.dof_point()),
apply_scale([i + j / 4 for i, j in zip(dof.dof_point(),
dof.dof_direction())]),
width="2px", color=COLORS["purple"])
p.add_dof_number(apply_scale(dof.dof_point()), dofn, color=COLORS["purple"])
ps.append(p)
return ps
def _parse_point(points, n):
point = points[n].strip()
if point == "cycle":
assert n > 0
return _parse_point(points, 0)
assert point[0] == "(" and point[-1] == ")"
x, y = point[1:-1].split(",")
return float(x), float(y)
def plot_img(filename):
p = Plot(id=f"img-{filename}")
with open(os.path.join(settings.img_path, f"{filename}.img")) as f:
for line in f:
line = line.split("#")[0]
line = line.strip()
color = "black"
if line.startswith("["):
color, line = line[1:].split("]", 1)
line = line.strip()
if color in COLORS:
color = COLORS[color]
points = line.split("--")
for i in range(len(points) - 1):
p1 = _parse_point(points, i)
p2 = _parse_point(points, i + 1)
p.add_line(p1, p2, color=color, width="2px")
return p
| 44.287742
| 100
| 0.460478
|
4d8565f35fb71853b99871e728886ece4ba4a7e2
| 193
|
py
|
Python
|
python/dgl/distributed/constants.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 1
|
2022-02-23T01:35:37.000Z
|
2022-02-23T01:35:37.000Z
|
python/dgl/distributed/constants.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/distributed/constants.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | null | null | null |
"""Define all the constants used by DGL rpc"""
# Maximum size of message queue in bytes
MAX_QUEUE_SIZE = 20*1024*1024*1024
SERVER_EXIT = "server_exit"
SERVER_KEEP_ALIVE = "server_keep_alive"
| 24.125
| 46
| 0.772021
|
3d421dc0c54163354752740fdbd16dc05aae1719
| 4,680
|
py
|
Python
|
swap_start/train_fast_pick/fast_play/tf_model.py
|
yudongqiu/gomoku
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
[
"MIT"
] | 3
|
2018-06-12T09:03:41.000Z
|
2019-01-14T05:34:57.000Z
|
swap_start/train_fast_pick/fast_play/tf_model.py
|
yudongqiu/gomoku
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
[
"MIT"
] | null | null | null |
swap_start/train_fast_pick/fast_play/tf_model.py
|
yudongqiu/gomoku
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import sys
import random
import tensorflow as tf
from tensorflow.keras import layers
tf.config.gpu.set_per_process_memory_fraction(0.4)
tf.keras.backend.clear_session()
class ResNetBlock(layers.Layer):
def __init__(self, filters=256, kernel_size=3):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
bn_axis = 3
# build layers
self.conv1 = layers.Conv2D(self.filters, self.kernel_size, padding='same')
self.batch1 = layers.BatchNormalization(axis=bn_axis)
self.activ1 = layers.Activation('relu')
self.conv2 = layers.Conv2D(self.filters, self.kernel_size, padding='same')
self.batch2 = layers.BatchNormalization(axis=bn_axis)
self.activ2 = layers.Activation('relu')
self.add = layers.Add()
def call(self, inputs, training=None):
x = inputs
x = self.conv1(x)
x = self.batch1(x, training=training)
x = self.activ1(x)
x = self.conv2(x)
x = self.batch2(x, training=training)
x = self.activ2(x)
x = self.add([x, inputs])
return x
def get_config(self):
return {'filters': self.filters, 'kernel_size': self.kernel_size}
class DNNModel(tf.keras.Model):
def __init__(self, n_stages=4, filters=256, kernel_size=3):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
bn_axis = 3
# build model network
self.layer_input = layers.InputLayer(input_shape=(15,15,3), name='input', dtype='float16')
self.layer_conv0 = layers.Conv2D(self.filters, self.kernel_size, padding='same')
self.layer_batch0 = layers.BatchNormalization(axis=bn_axis)
self.layer_activ0 = layers.Activation('relu')
# a list of resnet blocks
self.layer_resBlocks = [ResNetBlock(filters=self.filters, kernel_size=self.kernel_size) for _ in range(n_stages)]
# final evaluation head
self.layer_final_conv = layers.Conv2D(1, (1, 1))
self.layer_final_batch = layers.BatchNormalization(axis=bn_axis)
self.layer_final_activ = layers.Activation('relu')
self.layer_flatten = layers.Flatten()
self.layer_dense = layers.Dense(256, activation='relu')
self.layer_res = layers.Dense(1, activation='tanh', name='result')
self.training = False
def call(self, inputs, training=False):
x = inputs
training = self.training
if training:
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
x = tf.image.rot90(x, k=random.randint(0,3))
x = self.layer_input(x)
x = self.layer_conv0(x)
x = self.layer_batch0(x, training=training)
x = self.layer_activ0(x)
for res_block in self.layer_resBlocks:
x = res_block(x, training=training)
x = self.layer_final_conv(x)
x = self.layer_final_batch(x, training=training)
x = self.layer_final_activ(x)
x = self.layer_flatten(x)
x = self.layer_dense(x)
res = self.layer_res(x)
return res
# def save(self, path):
# """ Customized save method
# This is needed because the default save method only support Sequential/Functional Model
# """
# # Save JSON config to disk
# json_config = model.to_json()
# with open('model_config.json', 'w') as json_file:
# json_file.write(json_config)
# # Save weights to disk
# model.save_weights('path_to_my_weights.h5')
# def load(self, path):
# # Reload the model from the 2 files we saved
# with open('model_config.json') as json_file:
# json_config = json_file.read()
# new_model = keras.models.model_from_json(json_config)
# self.load_weights('path_to_my_weights.h5')
def get_new_model():
model = DNNModel(n_stages=5, filters=256, kernel_size=3)
optimizer = tf.optimizers.Adagrad(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='mean_squared_error')
return model
def save_model(model, path):
model.save_weights(path)
def load_existing_model(path):
model = get_new_model()
model.load_weights(path)
return model
#
def test_model():
import numpy as np
x_train = np.random.randint(0, 1, size=(1000,15,15,3)).astype(np.float32)
y_train = np.random.random(1000)*2-1
model = get_new_model()
model.fit(x_train, y_train, epochs=10, validation_split=0.2)
model.summary()
model.evaluate(x_train, y_train)
if __name__ == '__main__':
test_model()
| 35.454545
| 121
| 0.64594
|
4d9946cb2c38e5f053752deded8e77e487c01862
| 3,420
|
py
|
Python
|
huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/error_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/error_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/error_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class ErrorResponse:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'error_code': 'str',
'error_msg': 'str'
}
attribute_map = {
'error_code': 'error_code',
'error_msg': 'error_msg'
}
def __init__(self, error_code=None, error_msg=None):
"""ErrorResponse - a model defined in huaweicloud sdk"""
self._error_code = None
self._error_msg = None
self.discriminator = None
if error_code is not None:
self.error_code = error_code
if error_msg is not None:
self.error_msg = error_msg
@property
def error_code(self):
"""Gets the error_code of this ErrorResponse.
错误码。
:return: The error_code of this ErrorResponse.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ErrorResponse.
错误码。
:param error_code: The error_code of this ErrorResponse.
:type: str
"""
self._error_code = error_code
@property
def error_msg(self):
"""Gets the error_msg of this ErrorResponse.
错误描述。
:return: The error_msg of this ErrorResponse.
:rtype: str
"""
return self._error_msg
@error_msg.setter
def error_msg(self, error_msg):
"""Sets the error_msg of this ErrorResponse.
错误描述。
:param error_msg: The error_msg of this ErrorResponse.
:type: str
"""
self._error_msg = error_msg
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.782609
| 74
| 0.540058
|
30266e13f8d680972ddd2f507e28209e2be330a6
| 505
|
py
|
Python
|
venv_py36/Lib/site-packages/PyInstaller/hooks/hook-adios.py
|
PeterMoresco/RefriCalcSoft
|
1ed728ef1937fdda248cee19d97b3d13bd98af03
|
[
"MIT"
] | 1
|
2018-09-12T06:30:21.000Z
|
2018-09-12T06:30:21.000Z
|
venv_py36/Lib/site-packages/PyInstaller/hooks/hook-adios.py
|
PeterMoresco/RefriCalcSoft
|
1ed728ef1937fdda248cee19d97b3d13bd98af03
|
[
"MIT"
] | 1
|
2018-09-12T06:32:17.000Z
|
2018-09-12T19:03:50.000Z
|
venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-adios.py
|
rilakkyuma/tweetdelete
|
5ac4001b2ba7c7d87379e616c93361c2090ed4ae
|
[
"MIT"
] | 2
|
2018-12-29T07:49:59.000Z
|
2020-03-18T02:44:31.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2018, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Hook for http://pypi.python.org/pypi/adios/
"""
hiddenimports = ['adios._hl.selections']
| 29.705882
| 78
| 0.514851
|
577b0f304b3fc26f92788c81d419b85f086a9698
| 1,543
|
py
|
Python
|
front-end/MemDir.py
|
projetoemBR/teste_mgeo
|
a0078301f7dfd54431d6b51abcf092079ad0e5a3
|
[
"MIT"
] | 1
|
2021-12-09T18:33:24.000Z
|
2021-12-09T18:33:24.000Z
|
front-end/MemDir.py
|
albertoakel/teste_mgeo
|
a0078301f7dfd54431d6b51abcf092079ad0e5a3
|
[
"MIT"
] | null | null | null |
front-end/MemDir.py
|
albertoakel/teste_mgeo
|
a0078301f7dfd54431d6b51abcf092079ad0e5a3
|
[
"MIT"
] | 1
|
2021-12-09T18:34:12.000Z
|
2021-12-09T18:34:12.000Z
|
#!/usr/bin/python3
#-------- Code Dependencies ----------
#\__________General Utilities__________/
import pprint
import time
import sys
import os
import re
import numpy as np
#\__________Local functions__________/
import ipop_fun as ipop #I/P and O/P functions
import util_fun as util #Utility functions
import plt_fun as p #Plot functions
#\__________Specialized stuff__________/
from SimPEG import utils
try:
from pymatsolver import Pardiso as Solver
except:
from SimPEG import Solver
import pickle
from scipy.constants import mu_0, epsilon_0 as eps_0
#\_____________________________________/
#
# -------------- Main Program --------------
def main():
print( )
text = "Process Information"
print(text.center(60, "-"))
dummy = util.get_host_info()
pprint.pprint(dummy)
util.pline('** Total RAM: '+util.human_size(dummy['ram']))
dummy.clear()
#-- RAM monitoring
util.ramon()
#------------ Read I/P file
text = 'Open File for I/P'
print(text.center(70, "-"))
args = ipop.read_param()
dummy = args.folder + args.ipfile
util.pline([' Reading I/P file: ', dummy], width=30, align='^')
# file object-->|
with open(dummy, 'rb') as fobj:
data = pickle.load(fobj)
#-- RAM monitoring
util.ramon()
#------------ List data read
util.pline('\n Dictionary data[...] read:')
print(list(data.keys()))
#
# -------------- Controls Execution --------------
if __name__ == "__main__":
main()
#
raise SystemExit
| 25.295082
| 67
| 0.621517
|
97898460a21a12378e3473c94791d7b641c83843
| 3,113
|
py
|
Python
|
benchmarks/modeling/compound.py
|
eteq/astropy-benchmarks
|
a8d5b3f30b2f1d7252d450aad53bb0572e44a1e8
|
[
"BSD-3-Clause"
] | null | null | null |
benchmarks/modeling/compound.py
|
eteq/astropy-benchmarks
|
a8d5b3f30b2f1d7252d450aad53bb0572e44a1e8
|
[
"BSD-3-Clause"
] | null | null | null |
benchmarks/modeling/compound.py
|
eteq/astropy-benchmarks
|
a8d5b3f30b2f1d7252d450aad53bb0572e44a1e8
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from astropy.modeling import models
from astropy import units as u
x_no_units_scalar = 5
x_no_units_small = np.linspace(-4, 3, 50)
x_no_units_medium = np.linspace(-40, 300, 2000)
x_no_units_large = np.linspace(-4, 300, 5e-6)
def time_init_7_no_units():
m = (models.Shift(-10.5) & models.Shift(-13.2) |
models.AffineTransformation2D(matrix=[[1, 0], [0, 1]],
translation=[0, 0]) |
models.Scale(.01) & models.Scale(.04) |
models.Pix2Sky_TAN() |
models.RotateNative2Celestial(5.6, -72.05, 180))
def time_init_7_with_units():
aff = models.AffineTransformation2D(matrix=[[1, 0], [0, 1]]*u.arcsec,
translation=[0, 0]*u.arcsec)
aff.input_units_equivalencies = {'x': u.pixel_scale(1*u.arcsec/u.pix),
'y': u.pixel_scale(1*u.arcsec/u.pix)}
m = (models.Shift(-10.5*u.pix) & models.Shift(-13.2*u.pix) |
aff |
models.Scale(.01*u.arcsec) & models.Scale(.04*u.arcsec) |
models.Pix2Sky_TAN() |
models.RotateNative2Celestial(5.6*u.deg, -72.05*u.deg, 180*u.deg))
class EvaluateCompoundModelNoUnits:
def setup(self):
aff = models.AffineTransformation2D(matrix=[[1, 0], [0, 1]],
translation=[0, 0])
self.model = (models.Shift(-10.5) & models.Shift(-13.2) | aff |
models.Scale(.01) & models.Scale(.04) |
models.Pix2Sky_TAN() |
models.RotateNative2Celestial(5.6, -72.05, 180))
def time_scalar(self):
r, d = self.model(x_no_units_scalar, x_no_units_scalar)
def time_small(self):
r, d = self.model(x_no_units_small, x_no_units_small)
def time_medium(self):
r, d = self.model(x_no_units_medium, x_no_units_medium)
def time_large(self):
r, d = self.model(x_no_units_large, x_no_units_large)
class EvaluateCompoundModelWithUnits:
def setup(self):
aff = models.AffineTransformation2D(matrix=[[1, 0], [0, 1]] * u.arcsec,
translation=[0, 0] * u.arcsec)
aff.input_units_equivalencies = {'x': u.pixel_scale(1 * u.arcsec/u.pix),
'y': u.pixel_scale(1 * u.arcsec/u.pix)}
self.model = (models.Shift(-10.5 * u.pix) & models.Shift(-13.2 * u.pix) |
aff |
models.Scale(.01 * u.arcsec) & models.Scale(.04 * u.deg) |
models.Pix2Sky_TAN() |
models.RotateNative2Celestial(5.6 * u.deg, -72.05 * u.deg, 180 * u.deg))
def time_scalar(self):
r, d = self.model(x_no_units_scalar * u.pix, x_no_units_scalar * u.pix)
def time_small(self):
r, d = self.model(x_no_units_small * u.pix, x_no_units_small * u.pix)
def time_medium(self):
r, d = self.model(x_no_units_medium * u.pix, x_no_units_medium * u.pix)
def time_large(self):
r, d, = self.model(x_no_units_large * u.pix, x_no_units_large * u.pix)
| 39.910256
| 94
| 0.57019
|
8eab173c69f6c7fb7b013bdd8e1f988f873be88c
| 357
|
py
|
Python
|
olintut/magic_slide1.py
|
sihrc/olintut
|
445e9a406fb05ada9db4ed25015a148a7576a3c2
|
[
"MIT"
] | null | null | null |
olintut/magic_slide1.py
|
sihrc/olintut
|
445e9a406fb05ada9db4ed25015a148a7576a3c2
|
[
"MIT"
] | null | null | null |
olintut/magic_slide1.py
|
sihrc/olintut
|
445e9a406fb05ada9db4ed25015a148a7576a3c2
|
[
"MIT"
] | null | null | null |
import time
import random
from .utils import logger
@logger
def long_running_magic_number_generator():
"""
Magical numbers are conjured through black magic
"""
time.sleep(2)
return random.random()
if __name__ == "__main__":
magic_number = long_running_magic_number_generator()
print(f"Generated magic number {magic_number}")
| 19.833333
| 56
| 0.728291
|
0005c533b9ba3373ba3d6e9320f61fe8ab955db7
| 2,081
|
py
|
Python
|
altair_transform/transform/aggregate.py
|
altair-viz/altair-transform
|
b65bf854de1e80f931e063d8fb2ec938773826fb
|
[
"MIT"
] | 29
|
2019-07-19T08:53:34.000Z
|
2022-01-19T14:07:36.000Z
|
altair_transform/transform/aggregate.py
|
altair-viz/altair-transform
|
b65bf854de1e80f931e063d8fb2ec938773826fb
|
[
"MIT"
] | 13
|
2019-07-19T03:33:07.000Z
|
2021-06-29T15:34:19.000Z
|
altair_transform/transform/aggregate.py
|
altair-viz/altair-transform
|
b65bf854de1e80f931e063d8fb2ec938773826fb
|
[
"MIT"
] | 11
|
2019-07-19T02:48:35.000Z
|
2021-11-01T00:07:41.000Z
|
import altair as alt
import numpy as np
import pandas as pd
from .visitor import visit
@visit.register(alt.AggregateTransform)
def visit_aggregate(
transform: alt.AggregateTransform, df: pd.DataFrame
) -> pd.DataFrame:
transform = transform.to_dict()
groupby = transform.get("groupby", [])
agg_cols = {}
for aggregate in transform["aggregate"]:
op = aggregate["op"]
col = aggregate["as"]
field = aggregate.get("field", df.columns[0])
if op == "argmin":
def op(col, df=df):
return df.loc[col.idxmin()].to_dict()
elif op == "argmax":
def op(col, df=df):
return df.loc[col.idxmax()].to_dict()
else:
op = AGG_REPLACEMENTS.get(op, op)
if field == "*" and field not in df.columns:
field = df.columns[0]
if op == "values":
if groupby:
agg_cols[col] = df.groupby(groupby).apply(
lambda x: x.to_dict(orient="records")
)
else:
agg_cols[col] = [df.to_dict(orient="records")]
else:
if groupby:
agg_cols[col] = df.groupby(groupby)[field].aggregate(op)
else:
agg_cols[col] = [df[field].aggregate(op)]
df = pd.DataFrame(agg_cols)
if groupby:
df = df.reset_index()
return df
def confidence_interval(x: np.ndarray, level: float):
from scipy import stats
return stats.t.interval(level, len(x) - 1, loc=x.mean(), scale=x.sem())
AGG_REPLACEMENTS = {
"argmin": "idxmin",
"argmax": "idxmax",
"average": "mean",
"ci0": lambda x: confidence_interval(x, 0.05),
"ci1": lambda x: confidence_interval(x, 0.95),
"distinct": "nunique",
"stderr": "sem",
"stdev": "std",
"stdevp": lambda x: x.std(ddof=0),
"missing": lambda x: x.isnull().sum(),
"q1": lambda x: x.quantile(0.25),
"q3": lambda x: x.quantile(0.75),
"valid": "count",
"variance": "var",
"variancep": lambda x: x.var(ddof=0),
}
| 27.025974
| 75
| 0.552138
|
9f843af29361c4512a5c68fb5e77dc2a08c42c23
| 1,168
|
py
|
Python
|
qcelemental/tests/utils.py
|
PeterKraus/QCElemental
|
0ac5566f9889242c2daba21921984b548d5e2bc2
|
[
"BSD-3-Clause"
] | 1
|
2020-12-21T02:52:26.000Z
|
2020-12-21T02:52:26.000Z
|
qcelemental/tests/utils.py
|
PeterKraus/QCElemental
|
0ac5566f9889242c2daba21921984b548d5e2bc2
|
[
"BSD-3-Clause"
] | null | null | null |
qcelemental/tests/utils.py
|
PeterKraus/QCElemental
|
0ac5566f9889242c2daba21921984b548d5e2bc2
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import utils_compare
def true_false_decorator(compare_fn, *args, **kwargs):
"""Turns `compare_fn` that returns `None` on success and raises
`_TestComparisonError` on failure into a function that returns
True/False, suitable for assertions in pytest.
"""
def true_false_wrapper(*args, **kwargs):
try:
compare_fn(*args, **kwargs)
except utils_compare._TestComparisonError as err:
return False
else:
return True
return true_false_wrapper
compare_values = true_false_decorator(utils_compare.compare_values)
compare_strings = true_false_decorator(utils_compare.compare_strings)
compare_integers = true_false_decorator(utils_compare.compare_integers)
#compare_matrices = true_false_decorator(utils_compare.compare_matrices)
#compare_arrays = true_false_decorator(utils_compare.compare_arrays)
compare_dicts = true_false_decorator(utils_compare.compare_dicts)
compare_molrecs = true_false_decorator(utils_compare.compare_molrecs)
def tnm():
"""Returns the name of the calling function, usually name of test case."""
return sys._getframe().f_back.f_code.co_name
| 31.567568
| 78
| 0.763699
|
7c484cfa3b87b9f12c88292c8891b262db6e1829
| 728
|
py
|
Python
|
Episode11-Menu/Pygame/circle.py
|
Inksaver/Shmup_With_Pygame_Love2D_Monogame
|
84838516d9dd9d6639b1b699dca546bfdfec73dc
|
[
"CC0-1.0"
] | 1
|
2022-02-01T04:05:04.000Z
|
2022-02-01T04:05:04.000Z
|
Episode12-Leaderboard/Pygame/circle.py
|
Inksaver/Shmup_With_Pygame_Love2D_Monogame
|
84838516d9dd9d6639b1b699dca546bfdfec73dc
|
[
"CC0-1.0"
] | null | null | null |
Episode12-Leaderboard/Pygame/circle.py
|
Inksaver/Shmup_With_Pygame_Love2D_Monogame
|
84838516d9dd9d6639b1b699dca546bfdfec73dc
|
[
"CC0-1.0"
] | null | null | null |
''' Circle class to provdide circle collider'''
class Circle():
def __init__(self, x:int, y:int, radius:int) -> None:
self._x = x # x position
self._y = y # y position
self._radius = radius # circle radius
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def radius(self):
return self._radius
@x.setter
def x(self, value):
self._x = value
@y.setter
def y(self, value):
self._y = value
@radius.setter
def radius(self, value):
self._radius = value
@property
def center(self):
return [self._x, self._y]
@center.setter
def center(self, value:list):
self._x = value[0]
self._y = value[1]
| 18.2
| 55
| 0.607143
|
8fc7ec44b02e5d95ae18f47099527e501711cd8b
| 11,105
|
py
|
Python
|
Python_files/INRIX_data_preprocessing_18_prepare_link_flows_for_OD_demand_estimation_Oct.py
|
jingzbu/InverseVITraffic
|
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
|
[
"MIT"
] | null | null | null |
Python_files/INRIX_data_preprocessing_18_prepare_link_flows_for_OD_demand_estimation_Oct.py
|
jingzbu/InverseVITraffic
|
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
|
[
"MIT"
] | null | null | null |
Python_files/INRIX_data_preprocessing_18_prepare_link_flows_for_OD_demand_estimation_Oct.py
|
jingzbu/InverseVITraffic
|
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = "Jing Zhang"
__email__ = "jingzbu@gmail.com"
__status__ = "Development"
from util import *
road_seg_inr_capac = zload('../temp_files/road_seg_inr_capac.pkz')
# load tmc-day-ave_speed data for AM peak of October
tmc_day_speed_dict_Oct_AM = zload('../temp_files/Oct_AM/tmc_day_speed_dict.pkz')
# load tmc-day-ave_speed data for MD of October
tmc_day_speed_dict_Oct_MD = zload('../temp_files/Oct_MD/tmc_day_speed_dict.pkz')
# load tmc-day-ave_speed data for PM peak of October
tmc_day_speed_dict_Oct_PM = zload('../temp_files/Oct_PM/tmc_day_speed_dict.pkz')
# load tmc-day-ave_speed data for NT of October
tmc_day_speed_dict_Oct_NT = zload('../temp_files/Oct_NT/tmc_day_speed_dict.pkz')
tmc_day_capac_flow_minute_dict = {}
for i in range(len(road_seg_inr_capac.tmc)):
for day in range(32)[1:]:
tmc = road_seg_inr_capac.tmc[i]
road_num = road_seg_inr_capac.road_num[i]
shape_length = road_seg_inr_capac.shape_length[i]
day = day
AB_AM_capac = road_seg_inr_capac.AB_AM_capac[i]
AB_MD_capac = road_seg_inr_capac.AB_MD_capac[i]
AB_PM_capac = road_seg_inr_capac.AB_PM_capac[i]
AB_NT_capac = road_seg_inr_capac.AB_NT_capac[i]
AM_ave_speed = tmc_day_speed_dict_Oct_AM[tmc + str(day)].ave_speed()
MD_ave_speed = tmc_day_speed_dict_Oct_MD[tmc + str(day)].ave_speed()
PM_ave_speed = tmc_day_speed_dict_Oct_PM[tmc + str(day)].ave_speed()
NT_ave_speed = tmc_day_speed_dict_Oct_NT[tmc + str(day)].ave_speed()
AM_speed_minute = tmc_day_speed_dict_Oct_AM[tmc + str(day)].speed
MD_speed_minute = tmc_day_speed_dict_Oct_MD[tmc + str(day)].speed
PM_speed_minute = tmc_day_speed_dict_Oct_PM[tmc + str(day)].speed
NT_speed_minute = tmc_day_speed_dict_Oct_NT[tmc + str(day)].speed
tmc_day_capac_flow_minute = RoadSegInrCapacFlowMinute(tmc, road_num, shape_length, day, \
AB_AM_capac, AB_MD_capac, \
AB_PM_capac, AB_NT_capac, \
AM_ave_speed, MD_ave_speed, \
PM_ave_speed, NT_ave_speed, \
AM_speed_minute, MD_speed_minute, \
PM_speed_minute, NT_speed_minute)
assert(len(tmc_day_capac_flow_minute.AM_flow_minute()) == 120)
assert(len(tmc_day_capac_flow_minute.MD_flow_minute()) == 120)
assert(len(tmc_day_capac_flow_minute.PM_flow_minute()) == 120)
assert(len(tmc_day_capac_flow_minute.NT_flow_minute()) == 120)
tmc_day_capac_flow_minute_dict[tmc + str(day)] = tmc_day_capac_flow_minute
#zdump(tmc_day_capac_flow_minute_dict, '../temp_files/tmc_day_capac_flow_minute_dict_Oct.pkz')
link_with_capac_list = list(zload('../temp_files/links_with_capac.pkz'))
import numpy as np
day = 1
link_with_capac = link_with_capac_list[0]
AM_flow_minute = list(sum([np.array(tmc_day_capac_flow_minute_dict[tmc + str(day)].AM_flow_minute()) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_AM[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]) / \
sum([np.ones(len(tmc_day_capac_flow_minute_dict[tmc + str(day)].AM_flow_minute())) * \
tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_AM[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]))
# print(tmc, tmc_length_dict[tmc], tmc_day_speed_dict_Oct_AM[tmc + str(day)].speed[0:5])
# print(np.array(tmc_day_capac_flow_minute_dict[tmc + str(day)].AM_flow_minute())[0:5])
# print(AM_flow_minute[0:5])
# aa = [1., 2., 3.]
# bb = [0.1, 0.2, 4.0]
# print(np.array(aa) / bb)
# print(sum([np.array(aa), np.array(bb)]))
tmc_day_capac_flow_dict = zload('../temp_files/tmc_day_capac_flow_dict_Oct.pkz')
link_day_minute_Oct_dict = {}
for day in range(32)[1:]:
i = 0
for link_with_capac in link_with_capac_list:
AM_flow = sum([tmc_day_capac_flow_dict[tmc + str(day)].AM_flow() * tmc_length_dict[tmc] / \
tmc_day_speed_dict_Oct_AM[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set]) / \
sum([tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_AM[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set])
MD_flow = sum([tmc_day_capac_flow_dict[tmc + str(day)].MD_flow() * tmc_length_dict[tmc] / \
tmc_day_speed_dict_Oct_MD[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set]) / \
sum([tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_MD[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set])
PM_flow = sum([tmc_day_capac_flow_dict[tmc + str(day)].PM_flow() * tmc_length_dict[tmc] / \
tmc_day_speed_dict_Oct_PM[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set]) / \
sum([tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_PM[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set])
NT_flow = sum([tmc_day_capac_flow_dict[tmc + str(day)].NT_flow() * tmc_length_dict[tmc] / \
tmc_day_speed_dict_Oct_NT[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set]) / \
sum([tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_NT[tmc + str(day)].ave_speed() \
for tmc in link_with_capac.tmc_set])
AM_flow_minute = list(sum([np.array(tmc_day_capac_flow_minute_dict[tmc + str(day)].AM_flow_minute()) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_AM[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]) / \
sum([np.ones(len(tmc_day_capac_flow_minute_dict[tmc + str(day)].AM_flow_minute())) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_AM[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]))
MD_flow_minute = list(sum([np.array(tmc_day_capac_flow_minute_dict[tmc + str(day)].MD_flow_minute()) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_MD[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]) / \
sum([np.ones(len(tmc_day_capac_flow_minute_dict[tmc + str(day)].MD_flow_minute())) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_MD[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]))
PM_flow_minute = list(sum([np.array(tmc_day_capac_flow_minute_dict[tmc + str(day)].PM_flow_minute()) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_PM[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]) / \
sum([np.ones(len(tmc_day_capac_flow_minute_dict[tmc + str(day)].PM_flow_minute())) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_PM[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]))
NT_flow_minute = list(sum([np.array(tmc_day_capac_flow_minute_dict[tmc + str(day)].NT_flow_minute()) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_NT[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]) / \
sum([np.ones(len(tmc_day_capac_flow_minute_dict[tmc + str(day)].NT_flow_minute())) \
* tmc_length_dict[tmc] / tmc_day_speed_dict_Oct_NT[tmc + str(day)].speed \
for tmc in link_with_capac.tmc_set]))
link_with_capac_new = Link_with_Free_Flow_Time_Minute(link_with_capac.init_node, link_with_capac.term_node, \
link_with_capac.tmc_set, \
link_with_capac.AM_capac, \
link_with_capac.MD_capac, \
link_with_capac.PM_capac, \
link_with_capac.NT_capac, \
link_with_capac.free_flow_time, \
link_with_capac.length, \
AM_flow, MD_flow, PM_flow, NT_flow, \
AM_flow_minute, MD_flow_minute, \
PM_flow_minute, NT_flow_minute)
link_day_minute_Oct_dict['link_' + str(i) + '_' + str(day)] = link_with_capac_new
i = i + 1
#zdump(link_day_minute_Oct_dict, '../temp_files/link_day_minute_Oct_dict.pkz')
print(link_day_minute_Oct_dict['link_0_10'].PM_flow_minute[0:10])
link_day_minute_Oct_dict_JSON = {}
for link_idx in range(24):
for day in range(32)[1:]:
key = 'link_' + str(link_idx) + '_' + str(day)
data = {'link_idx': link_idx, 'day': day, \
'init_node': link_day_minute_Oct_dict[key].init_node, \
'term_node': link_day_minute_Oct_dict[key].term_node, \
'AM_capac': link_day_minute_Oct_dict[key].AM_capac, \
'MD_capac': link_day_minute_Oct_dict[key].MD_capac, \
'PM_capac': link_day_minute_Oct_dict[key].PM_capac, \
'NT_capac': link_day_minute_Oct_dict[key].NT_capac, \
'free_flow_time': link_day_minute_Oct_dict[key].free_flow_time, \
'length': link_day_minute_Oct_dict[key].length, \
'AM_flow': link_day_minute_Oct_dict[key].AM_flow, \
'MD_flow': link_day_minute_Oct_dict[key].MD_flow, \
'PM_flow': link_day_minute_Oct_dict[key].PM_flow, \
'NT_flow': link_day_minute_Oct_dict[key].NT_flow, \
'AM_flow_minute': link_day_minute_Oct_dict[key].AM_flow_minute, \
'MD_flow_minute': link_day_minute_Oct_dict[key].MD_flow_minute, \
'PM_flow_minute': link_day_minute_Oct_dict[key].PM_flow_minute, \
'NT_flow_minute': link_day_minute_Oct_dict[key].NT_flow_minute}
link_day_minute_Oct_dict_JSON[key] = data
import json
# Writing JSON data
with open('../temp_files/link_day_minute_Oct_dict_JSON.json', 'w') as json_file:
json.dump(link_day_minute_Oct_dict_JSON, json_file)
with open('../temp_files/link_day_minute_Oct_dict_JSON.json', 'r') as json_file:
link_day_minute_Oct_dict_JSON_ = json.load(json_file)
print(link_day_minute_Oct_dict_JSON_['link_3_9'] ['AM_flow_minute'][0:10])
| 57.838542
| 117
| 0.600991
|
337d997218e2f10af70b7dd5f51cc535cbaada83
| 566
|
py
|
Python
|
sunpy/timeseries/sources/tests/test_fermi_gbm.py
|
Octaves0911/sunpy
|
d3dff03fe6cc404e40f22da90200ffbb3d38c1a7
|
[
"BSD-2-Clause"
] | 1
|
2019-03-11T12:28:25.000Z
|
2019-03-11T12:28:25.000Z
|
sunpy/timeseries/sources/tests/test_fermi_gbm.py
|
Octaves0911/sunpy
|
d3dff03fe6cc404e40f22da90200ffbb3d38c1a7
|
[
"BSD-2-Clause"
] | 10
|
2017-08-10T07:55:42.000Z
|
2020-04-19T10:56:43.000Z
|
sunpy/timeseries/sources/tests/test_fermi_gbm.py
|
Octaves0911/sunpy
|
d3dff03fe6cc404e40f22da90200ffbb3d38c1a7
|
[
"BSD-2-Clause"
] | 1
|
2019-02-06T11:57:56.000Z
|
2019-02-06T11:57:56.000Z
|
import sunpy.timeseries
from sunpy.data.test import get_test_filepath
fermi_gbm_filepath = get_test_filepath('gbm.fits')
def test_implicit_fermi_gbm():
# Test a GBMSummary TimeSeries
ts_gbm = sunpy.timeseries.TimeSeries(fermi_gbm_filepath)
assert isinstance(ts_gbm, sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries)
def test_fermi_gbm():
# Test a GBMSummary TimeSeries
ts_gbm = sunpy.timeseries.TimeSeries(fermi_gbm_filepath, source='GBMSummary')
assert isinstance(ts_gbm, sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries)
| 33.294118
| 86
| 0.805654
|
7225752618305e541081aa09c7c557d909fd8dda
| 24,039
|
py
|
Python
|
blist/test/mapping_tests.py
|
FelixKleineBoesing/blist
|
29fabf804aaded386231733047eb700aff43724b
|
[
"BSD-3-Clause"
] | null | null | null |
blist/test/mapping_tests.py
|
FelixKleineBoesing/blist
|
29fabf804aaded386231733047eb700aff43724b
|
[
"BSD-3-Clause"
] | null | null | null |
blist/test/mapping_tests.py
|
FelixKleineBoesing/blist
|
29fabf804aaded386231733047eb700aff43724b
|
[
"BSD-3-Clause"
] | null | null | null |
# This file taken from Python, licensed under the Python License Agreement
# tests common to dict and UserDict
import sys
import collections.abc as collections
from blist.test import unittest
try:
from collections import UserDict # Python 3
except ImportError:
from UserDict import UserDict # Python 2
class BasicTestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
type2test = None # which class is being tested (overwrite in subclasses)
def _reference(self): # pragma: no cover
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key:value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key:value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p: # pragma: no cover
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.failUnlessRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#__contains__
for k in self.reference:
self.assert_(k in d)
for k in self.other:
self.failIf(k in d)
#cmp
self.assertEqual(p, p)
self.assertEqual(d, d)
self.assertNotEqual(p, d)
self.assertNotEqual(d, p)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
if sys.version_info[0] < 3: # pragma: no cover
self.assert_(hasattr(iter, 'next'))
else: # pragma: no cover
self.assert_(hasattr(iter, '__next__'))
self.assert_(hasattr(iter, '__iter__'))
x = list(iter)
self.assert_(set(x)==set(lst)==set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()),
self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()),
self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()),
self.reference.items())
#get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.failIf(knownkey in d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.failUnlessRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = list(p.items())
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefault
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.failIf(knownkey in d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.failIf(knownkey in d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.failIf(key in d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assert_(not self._empty_mapping())
self.assert_(self.reference)
self.assert_(bool(self._empty_mapping()) is False)
self.assert_(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assert_(list(self.inmapping.keys())[0] in d.keys())
self.assert_(list(self.other.keys())[0] not in d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(list(d.values()), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(list(d.items()), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(d[list(self.inmapping.keys())[0]],
list(self.inmapping.values())[0])
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted(d.items())
i2 = sorted(self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception): pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
next = __next__
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
else: # pragma: no cover
raise StopIteration
next = __next__
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
next = __next__
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assert_(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
d = self.reference
self.assert_(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
self.assertEqual(d.get(list(self.inmapping.keys())[0]),
list(self.inmapping.values())[0])
self.assertEqual(d.get(list(self.inmapping.keys())[0], 3),
list(self.inmapping.values())[0])
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = list(self.inmapping.items())[0]
d[k] = v
self.assertRaises(KeyError, d.pop, list(self.other.keys())[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
class TestMappingProtocol(BasicTestMappingProtocol):
def test_constructor(self):
BasicTestMappingProtocol.test_constructor(self)
self.assert_(self._empty_mapping() is not self._empty_mapping())
self.assertEqual(self.type2test(x=1, y=2), self._full_mapping({"x": 1, "y": 2}))
def test_bool(self):
BasicTestMappingProtocol.test_bool(self)
self.assert_(not self._empty_mapping())
self.assert_(self._full_mapping({"x": "y"}))
self.assert_(bool(self._empty_mapping()) is False)
self.assert_(bool(self._full_mapping({"x": "y"})) is True)
def test_keys(self):
BasicTestMappingProtocol.test_keys(self)
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
self.assert_('a' in k)
self.assert_('b' in k)
self.assert_('c' not in k)
def test_values(self):
BasicTestMappingProtocol.test_values(self)
d = self._full_mapping({1:2})
self.assertEqual(list(d.values()), [2])
def test_items(self):
BasicTestMappingProtocol.test_items(self)
d = self._full_mapping({1:2})
self.assertEqual(list(d.items()), [(1, 2)])
def test_contains(self):
d = self._empty_mapping()
self.assert_(not ('a' in d))
self.assert_('a' not in d)
d = self._full_mapping({'a': 1, 'b': 2})
self.assert_('a' in d)
self.assert_('b' in d)
self.assert_('c' not in d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
BasicTestMappingProtocol.test_len(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
BasicTestMappingProtocol.test_getitem(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, self._full_mapping({'a': 4, 'c': 3}))
self.assertRaises(TypeError, d.__getitem__)
def test_clear(self):
d = self._full_mapping({1:1, 2:2, 3:3})
d.clear()
self.assertEqual(d, self._full_mapping({}))
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
BasicTestMappingProtocol.test_update(self)
# mapping argument
d = self._empty_mapping()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, self._full_mapping({1:1, 2:2, 3:3}))
# no argument
d.update()
self.assertEqual(d, self._full_mapping({1:1, 2:2, 3:3}))
# keyword arguments
d = self._empty_mapping()
d.update(x=100)
d.update(y=20)
d.update(x=1, y=2, z=3)
self.assertEqual(d, self._full_mapping({"x":1, "y":2, "z":3}))
# item sequence
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)])
self.assertEqual(d, self._full_mapping({"x":100, "y":20}))
# Both item sequence and keyword arguments
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)], x=1, y=2)
self.assertEqual(d, self._full_mapping({"x":1, "y":2}))
# iterator
d = self._full_mapping({1:3, 2:4})
d.update(self._full_mapping({1:2, 3:4, 5:6}).items())
self.assertEqual(d, self._full_mapping({1:2, 2:4, 3:4, 5:6}))
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, self._full_mapping({1:1, 2:2, 3:3}))
def test_fromkeys(self):
self.assertEqual(self.type2test.fromkeys('abc'), self._full_mapping({'a':None, 'b':None, 'c':None}))
d = self._empty_mapping()
self.assert_(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), self._full_mapping({'a':None, 'b':None, 'c':None}))
self.assertEqual(d.fromkeys((4,5),0), self._full_mapping({4:0, 5:0}))
self.assertEqual(d.fromkeys([]), self._full_mapping({}))
def g():
yield 1
self.assertEqual(d.fromkeys(g()), self._full_mapping({1:None}))
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(self.type2test): pass
self.assertEqual(dictlike.fromkeys('a'), self._full_mapping({'a':None}))
self.assertEqual(dictlike().fromkeys('a'), self._full_mapping({'a':None}))
self.assert_(dictlike.fromkeys('a').__class__ is dictlike)
self.assert_(dictlike().fromkeys('a').__class__ is dictlike)
# FIXME: the following won't work with UserDict, because it's an old style class
# self.assert_(type(dictlike.fromkeys('a')) is dictlike)
class mydict(self.type2test):
def __new__(cls):
return UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
# FIXME: the following won't work with UserDict, because it's an old style class
# self.assert_(isinstance(ud, collections.UserDict))
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(self.type2test):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
next = __next__
self.assertRaises(Exc, self.type2test.fromkeys, BadSeq())
class baddict2(self.type2test):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
def test_copy(self):
d = self._full_mapping({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), self._full_mapping({1:1, 2:2, 3:3}))
d = self._empty_mapping()
self.assertEqual(d.copy(), d)
self.assert_(isinstance(d.copy(), d.__class__))
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
BasicTestMappingProtocol.test_get(self)
d = self._empty_mapping()
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = self._full_mapping({'a' : 1, 'b' : 2})
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
def test_setdefault(self):
BasicTestMappingProtocol.test_setdefault(self)
d = self._empty_mapping()
self.assert_(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assert_(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
def test_popitem(self):
BasicTestMappingProtocol.test_popitem(self)
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = self._empty_mapping()
b = self._empty_mapping()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assert_(not(copymode < 0 and ta != tb))
self.assert_(not a)
self.assert_(not b)
def test_pop(self):
BasicTestMappingProtocol.test_pop(self)
# Tests for pop with specified key
d = self._empty_mapping()
k, v = 'abc', 'def'
# verify longs/ints get same value when key > 32 bits (for 64-bit archs)
# see SF bug #689659
x = 4503599627370496
y = 4503599627370496
h = self._full_mapping({x: 'anything', y: 'something else'})
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
class TestHashMappingProtocol(TestMappingProtocol):
def test_getitem(self):
TestMappingProtocol.test_getitem(self)
class Exc(Exception): pass
class BadEq(object):
def __eq__(self, other): # pragma: no cover
raise Exc()
def __hash__(self):
return 24
d = self._empty_mapping()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_fromkeys(self):
TestMappingProtocol.test_fromkeys(self)
class mydict(self.type2test):
def __new__(cls):
return UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assert_(isinstance(ud, UserDict))
def test_pop(self):
TestMappingProtocol.test_pop(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self): # pragma: no cover
d = self._empty_mapping()
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self): # pragma: no cover
d = self._empty_mapping()
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = self._empty_mapping()
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = self._full_mapping({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
self.assertEqual(self._full_mapping({1: 2}),
self._full_mapping({1: 2}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = self._full_mapping({BadCmp(): 1})
d2 = self._full_mapping({1: 1})
self.assertRaises(Exc, lambda: BadCmp()==1)
#self.assertRaises(Exc, lambda: d1==d2)
def test_setdefault(self):
TestMappingProtocol.test_setdefault(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
| 35.351471
| 109
| 0.538833
|
568e416b30dd468dda686bc8ef7923f2ec457fa3
| 3,989
|
py
|
Python
|
web/manager.py
|
tt20050510/howard-dfs-web
|
7cd17847e17fa69e3ad0be6c797ff07434b2ae7f
|
[
"Apache-2.0"
] | null | null | null |
web/manager.py
|
tt20050510/howard-dfs-web
|
7cd17847e17fa69e3ad0be6c797ff07434b2ae7f
|
[
"Apache-2.0"
] | null | null | null |
web/manager.py
|
tt20050510/howard-dfs-web
|
7cd17847e17fa69e3ad0be6c797ff07434b2ae7f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import json, os, configparser
import uuid
from flask import Blueprint, render_template, session, request, jsonify
from empty.TaskEmpty import Task
from timing.thread_task import JobTime
from utils import cf
from web import user_page, user_data
jt = JobTime()
TaskSession = jt.TaskSession
manager = Blueprint("/manager", __name__)
@manager.route("/", methods=["GET"])
@user_page
def index():
return render_template("manage/manage.html")
@manager.route("/hello.page", methods=["GET"])
@user_page
def hello():
return render_template("manage/hello.html")
@manager.route("/log", methods=["GET"])
@user_page
def log_page():
return render_template("manage/log.html")
@manager.route("/login", methods=["POST"])
def login():
data = request.form
user = {x[0]: x[1] for x in cf.items(section="admin")}
print(data)
if user.get("user") == data.get("username") and user.get("password") == data.get("password"):
session["user_login"] = 1
res_json = {
"status": True,
"msg": "登录成功"
}
else:
res_json = {
"status": False,
"msg": "登录失败,用户名或密码错误"
}
return json.dumps(res_json, check_circular=False, ensure_ascii=False)
@manager.route("/task", methods=["GET"])
@user_page
def task():
return render_template("manage/task.html")
###
# 任务控制接口
###
@manager.route("/task.json", methods=["GET", "POST"])
@user_page
def task_data():
res_json = {
"msg": "",
"count": TaskSession.query(Task).count(),
"code": 0,
"data": [_.to_dict() for _ in TaskSession.query(Task).all()]
}
return str(json.dumps(res_json, check_circular=False, ensure_ascii=False))
@manager.route("/enable.json", methods=["POST"])
@user_data
def enable_json():
data = request.form
task_id = data.get("task_id", "")
enable = True if data.get("enable", False) else False
if task_id == "":
return jsonify({"status": False, "msg": "参数错误"})
TaskSession.query(Task).filter_by(task_id=task_id).update({'enable': enable, })
TaskSession.commit()
jt.initTask()
return jsonify({"status": True, "msg": "修改成功"})
@manager.route("/addTask", methods=["GET", "POST"])
@user_data
def addTask():
data = request.form
content = data.get("content")
time = data.get("time")
label = data.get("label")
task_id = str(uuid.uuid4())
command = data.get("command", "")
enable = data.get("enable") is not None
mark = data.get("mark")
TaskSession.add(Task(
task_id=task_id,
enable=enable,
content=content,
time=time,
label=label,
command=command,
mark=mark
))
TaskSession.commit()
jt.initTask()
return str(json.dumps({
"status": True,
"msg": "添加成功"
}, check_circular=False, ensure_ascii=False))
@manager.route("/editTask", methods=["GET", "POST"])
@user_data
def editTask():
data = request.form
content = data.get("content")
time = data.get("time")
label = data.get("label")
command = data.get("command", "")
enable = data.get("enable") is not None
mark = data.get("mark")
task_id = data.get("task_id")
TaskSession.query(Task).filter_by(task_id=task_id).update({
"enable": enable,
"content": content,
"time": time,
"label": label,
"command": command,
"mark": mark
})
TaskSession.commit()
jt.initTask()
return str(json.dumps({
"status": True,
"msg": "修改成功"
}, check_circular=False, ensure_ascii=False))
@manager.route("/delTask", methods=["GET", "POST"])
@user_data
def delTask():
data = request.form
task_id = data.get("task_id")
TaskSession.query(Task).filter_by(task_id=task_id).delete()
TaskSession.commit()
jt.initTask()
return str(json.dumps({
"status": True,
"msg": "删除成功"
}, check_circular=False, ensure_ascii=False))
| 24.623457
| 97
| 0.611933
|
7e1cce8847061fbcf61cf94b29dc747358d54df4
| 1,086
|
py
|
Python
|
spacy/ml/extract_ngrams.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 2
|
2017-06-23T20:54:31.000Z
|
2022-01-06T08:11:49.000Z
|
spacy/ml/extract_ngrams.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-03-01T19:01:37.000Z
|
2021-03-01T19:01:37.000Z
|
spacy/ml/extract_ngrams.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-21T07:17:48.000Z
|
2021-06-21T07:17:48.000Z
|
from thinc.api import Model
from ..attrs import LOWER
def extract_ngrams(ngram_size: int, attr: int = LOWER) -> Model:
model = Model("extract_ngrams", forward)
model.attrs["ngram_size"] = ngram_size
model.attrs["attr"] = attr
return model
def forward(model: Model, docs, is_train: bool):
batch_keys = []
batch_vals = []
for doc in docs:
unigrams = model.ops.asarray(doc.to_array([model.attrs["attr"]]))
ngrams = [unigrams]
for n in range(2, model.attrs["ngram_size"] + 1):
ngrams.append(model.ops.ngrams(n, unigrams))
keys = model.ops.xp.concatenate(ngrams)
keys, vals = model.ops.xp.unique(keys, return_counts=True)
batch_keys.append(keys)
batch_vals.append(vals)
lengths = model.ops.asarray([arr.shape[0] for arr in batch_keys], dtype="int32")
batch_keys = model.ops.xp.concatenate(batch_keys)
batch_vals = model.ops.asarray(model.ops.xp.concatenate(batch_vals), dtype="f")
def backprop(dY):
return []
return (batch_keys, batch_vals, lengths), backprop
| 32.909091
| 84
| 0.6593
|
c9fd50715f5f3c2dea6b1ffa957fa7fe1f219c1c
| 5,326
|
py
|
Python
|
python/qisrc/test/test_sync_git.py
|
aldebaran/qibuild
|
efea6fa3744664348717fe5e8df708a3cf392072
|
[
"BSD-3-Clause"
] | 51
|
2015-01-05T14:35:13.000Z
|
2021-07-27T06:46:59.000Z
|
python/qisrc/test/test_sync_git.py
|
aldebaran/qibuild
|
efea6fa3744664348717fe5e8df708a3cf392072
|
[
"BSD-3-Clause"
] | 104
|
2015-04-09T10:48:42.000Z
|
2020-09-16T16:33:29.000Z
|
python/qisrc/test/test_sync_git.py
|
aldebaran/qibuild
|
efea6fa3744664348717fe5e8df708a3cf392072
|
[
"BSD-3-Clause"
] | 46
|
2015-01-05T14:35:16.000Z
|
2022-02-13T20:39:36.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test Sync Git """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
from qisrc.git_config import Branch
def create_foo(git_server, tmpdir, test_git):
""" Create Foo """
foo_git = test_git(tmpdir.join("foo").strpath)
foo_repo = git_server.create_repo("foo.git")
foo_git.clone(foo_repo.clone_url)
return foo_git
def test_up_to_date(git_server, tmpdir, test_git):
""" Test Up To Date """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
foo_git.sync_branch(branch)
def test_fast_forward(git_server, tmpdir, test_git):
""" Test Fast Forward """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master")
foo_git.sync_branch(branch)
assert foo_git.get_current_branch() == "master"
assert foo_git.read_file("README") == "README on master"
def test_rebase_by_default(git_server, tmpdir, test_git):
""" Test Rebase By Default """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master")
foo_git.commit_file("bar", "bar on master")
foo_git.sync_branch(branch)
assert foo_git.get_current_branch() == "master"
assert foo_git.read_file("README") == "README on master"
assert foo_git.read_file("bar") == "bar on master"
rc, head = foo_git.call("show", "HEAD", raises=False)
assert rc == 0
assert "Merge" not in head
def test_skip_if_unclean(git_server, tmpdir, test_git):
""" Test Skip If UnClean """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master")
foo_git.sync_branch(branch)
foo_git.root.join("README").write("changing README")
(res, message) = foo_git.sync_branch(branch)
assert foo_git.read_file("README") == "changing README"
assert res is None
assert "unstaged changes" in message
def test_do_not_call_rebase_abort_when_reset_fails(git_server, tmpdir, test_git):
""" Test Do Not Call Rebase Abort When Reset Fails """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master")
foo_path = foo_git.repo
index_lock = os.path.join(foo_path, ".git", "index.lock")
with open(index_lock, "w") as fp:
fp.write("")
(res, message) = foo_git.sync_branch(branch)
assert res is False
assert "rebase --abort" not in message
def test_push_nonfastforward(git_server, tmpdir, test_git):
""" Test Push No Fast Forward """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master v1")
foo_git.sync_branch(branch)
git_server.push_file("foo.git", "README", "README on master v2",
fast_forward=False)
(res, _message) = foo_git.sync_branch(branch)
assert res is True
assert foo_git.read_file("README") == "README on master v2"
def test_run_abort_when_rebase_fails(git_server, tmpdir, test_git):
""" Test Run Abort When Rebase Fails """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master v1")
foo_git.sync_branch(branch)
git_server.push_file("foo.git", "README", "README on master v2",
fast_forward=False)
foo_git.commit_file("unrelated.txt", "Unrelated changes")
(res, message) = foo_git.sync_branch(branch)
assert res is False
assert foo_git.get_current_branch() is not None
assert "Rebase failed" in message
assert foo_git.read_file("unrelated.txt") == "Unrelated changes"
assert foo_git.read_file("README") == "README on master v1"
def test_fail_if_empty(tmpdir, test_git):
""" Test Fail If Empty """
foo_git = test_git(tmpdir.strpath)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
foo_git.set_tracking_branch("master", "origin") # repo empty: fails
(res, message) = foo_git.sync_branch(branch)
assert res is None
assert "no commits" in message
def test_clean_error_when_fetch_fails(git_server, tmpdir, test_git):
""" Test Clean Error When Fetch Fails """
foo_git = create_foo(git_server, tmpdir, test_git)
branch = Branch()
branch.name = "master"
branch.tracks = "origin"
git_server.push_file("foo.git", "README", "README on master")
git_server.srv.remove()
res, message = foo_git.sync_branch(branch)
assert res is False
assert "Fetch failed" in message
| 35.744966
| 84
| 0.686632
|
3e3e307d86193ce5d3a5013b5cc0ad0b6907faeb
| 4,412
|
py
|
Python
|
gluoncv/utils/export_helper.py
|
PistonY/gluon-cv
|
aff5c36c0a1985350d32b766df5644e5648f4d13
|
[
"Apache-2.0"
] | 13
|
2019-03-04T13:26:58.000Z
|
2020-12-15T12:45:42.000Z
|
gluoncv/utils/export_helper.py
|
PistonY/gluon-cv
|
aff5c36c0a1985350d32b766df5644e5648f4d13
|
[
"Apache-2.0"
] | 1
|
2018-09-20T19:31:37.000Z
|
2018-09-20T19:31:37.000Z
|
gluoncv/utils/export_helper.py
|
PistonY/gluon-cv
|
aff5c36c0a1985350d32b766df5644e5648f4d13
|
[
"Apache-2.0"
] | 3
|
2019-03-06T02:11:20.000Z
|
2021-03-05T09:09:28.000Z
|
"""Helper utils for export HybridBlock to symbols."""
from __future__ import absolute_import
import mxnet as mx
from mxnet.base import MXNetError
from mxnet.gluon import HybridBlock
from mxnet.gluon import nn
class _DefaultPreprocess(HybridBlock):
"""Default preprocess block used by GluonCV.
The default preprocess block includes:
- mean [123.675, 116.28, 103.53]
- std [58.395, 57.12, 57.375]
- transpose to (B, 3, H, W)
It is used to transform from resized original images with shape (1, H, W, 3) or (B, H, W, 3)
in range (0, 255) and RGB color format.
"""
def __init__(self, **kwargs):
super(_DefaultPreprocess, self).__init__(**kwargs)
with self.name_scope():
mean = mx.nd.array([123.675, 116.28, 103.53]).reshape((1, 1, 1, 3))
scale = mx.nd.array([58.395, 57.12, 57.375]).reshape((1, 1, 1, 3))
self.init_mean = self.params.get_constant('init_mean', mean)
self.init_scale = self.params.get_constant('init_scale', scale)
# pylint: disable=arguments-differ
def hybrid_forward(self, F, x, init_mean, init_scale):
x = F.broadcast_minus(x, init_mean)
x = F.broadcast_div(x, init_scale)
x = F.transpose(x, axes=(0, 3, 1, 2))
return x
def export_block(path, block, data_shape=None, epoch=0, preprocess=True, layout='HWC',
ctx=mx.cpu()):
"""Helper function to export a HybridBlock to symbol JSON to be used by
`SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface..
Parameters
----------
path : str
Path to save model.
Two files path-symbol.json and path-xxxx.params will be created,
where xxxx is the 4 digits epoch number.
block : mxnet.gluon.HybridBlock
The hybridizable block. Note that normal gluon.Block is not supported.
data_shape : tuple of int, default is None
Fake data shape just for export purpose, in format (H, W, C).
If you don't specify ``data_shape``, `export_block` will try use some common data_shapes,
e.g., (224, 224, 3), (256, 256, 3), (299, 299, 3), (512, 512, 3)...
If any of this ``data_shape`` goes through, the export will succeed.
epoch : int
Epoch number of saved model.
preprocess : mxnet.gluon.HybridBlock, default is True.
Preprocess block prior to the network.
By default (True), it will subtract mean [123.675, 116.28, 103.53], divide
std [58.395, 57.12, 57.375], and convert original image (B, H, W, C and range [0, 255]) to
tensor (B, C, H, W) as network input. This is the default preprocess behavior of all GluonCV
pre-trained models.
You can use custom pre-process hybrid block or disable by set ``preprocess=None``.
layout : str, default is 'HWC'
The layout for raw input data. By default is HWC. Supports 'HWC' and 'CHW'.
Note that image channel order is always RGB.
ctx: mx.Context, default mx.cpu()
Network context.
Returns
-------
None
"""
# input image layout
if data_shape is None:
data_shapes = [(s, s, 3) for s in (224, 256, 299, 300, 320, 416, 512, 600)]
else:
data_shapes = [data_shape]
if preprocess:
# add preprocess block
if preprocess is True:
preprocess = _DefaultPreprocess()
else:
if not isinstance(preprocess, HybridBlock):
raise TypeError("preprocess must be HybridBlock, given {}".format(type(preprocess)))
wrapper_block = nn.HybridSequential()
preprocess.initialize()
wrapper_block.add(preprocess)
wrapper_block.add(block)
else:
wrapper_block = block
# try different data_shape if possible, until one fits the network
for dshape in data_shapes:
h, w, c = dshape
if layout == 'HWC':
x = mx.nd.zeros((1, h, w, c), ctx=ctx)
elif layout == 'CHW':
x = mx.nd.zeros((1, c, h, w), ctx=ctx)
# hybridize and forward once
wrapper_block.hybridize()
last_exception = None
try:
wrapper_block(x)
wrapper_block.export(path, epoch)
break
except MXNetError as e:
last_exception = e
if last_exception is not None:
raise RuntimeError(str(last_exception).splitlines()[0])
| 38.034483
| 100
| 0.618994
|
dbb8d363a3f18dabc2ed5ebbad4e82e02a08505c
| 703
|
py
|
Python
|
setup.py
|
Danny-Dasilva/TPUCameraManager
|
e9fff44bb23e913c7e9178dce140dfac438bb382
|
[
"MIT"
] | null | null | null |
setup.py
|
Danny-Dasilva/TPUCameraManager
|
e9fff44bb23e913c7e9178dce140dfac438bb382
|
[
"MIT"
] | null | null | null |
setup.py
|
Danny-Dasilva/TPUCameraManager
|
e9fff44bb23e913c7e9178dce140dfac438bb382
|
[
"MIT"
] | 1
|
2019-12-18T01:56:57.000Z
|
2019-12-18T01:56:57.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="TPUCameraManager", # Replace with your own username
version="0.1.1",
author="Devin Willis",
author_email="gatordevin@gmail.com",
description="A package for camera streams on the google coral",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 31.954545
| 67
| 0.677098
|
a3335724d841a69ea13920a1a223d81132bd526b
| 1,106
|
py
|
Python
|
notebooks/legacy/TPOT/tpot_pipeline_generations-3_popsize-20_cvfolds-3.py
|
jrbourbeau/cr-composition
|
e9efb4b713492aaf544b5dd8bb67280d4f108056
|
[
"MIT"
] | null | null | null |
notebooks/legacy/TPOT/tpot_pipeline_generations-3_popsize-20_cvfolds-3.py
|
jrbourbeau/cr-composition
|
e9efb4b713492aaf544b5dd8bb67280d4f108056
|
[
"MIT"
] | 7
|
2017-08-29T16:20:04.000Z
|
2018-06-12T16:58:36.000Z
|
notebooks/legacy/TPOT/tpot_pipeline_generations-3_popsize-20_cvfolds-3.py
|
jrbourbeau/cr-composition
|
e9efb4b713492aaf544b5dd8bb67280d4f108056
|
[
"MIT"
] | 1
|
2018-04-03T20:56:40.000Z
|
2018-04-03T20:56:40.000Z
|
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import FunctionTransformer
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_classes, testing_classes = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = make_pipeline(
make_union(VotingClassifier([("est", BernoulliNB(alpha=0.16, binarize=0.19, fit_prior=True))]), FunctionTransformer(lambda X: X)),
GradientBoostingClassifier(learning_rate=0.47, max_features=0.47, n_estimators=500)
)
exported_pipeline.fit(training_features, training_classes)
results = exported_pipeline.predict(testing_features)
| 50.272727
| 134
| 0.814647
|
36517e7053ba4d56507b60e527fe195b489cdf30
| 1,611
|
py
|
Python
|
von-x-agent/src/gunicorn_config.py
|
ianco/von-agent-template
|
eaca58314cfc9fa2e157ed72acc09bd2beb23006
|
[
"Apache-2.0"
] | 7
|
2019-03-06T20:11:00.000Z
|
2021-09-04T13:58:10.000Z
|
von-x-agent/src/gunicorn_config.py
|
ianco/von-agent-template
|
eaca58314cfc9fa2e157ed72acc09bd2beb23006
|
[
"Apache-2.0"
] | 47
|
2018-07-21T22:39:11.000Z
|
2022-03-02T13:08:36.000Z
|
von-x-agent/src/gunicorn_config.py
|
ianco/von-agent-template
|
eaca58314cfc9fa2e157ed72acc09bd2beb23006
|
[
"Apache-2.0"
] | 20
|
2018-11-09T18:07:56.000Z
|
2021-02-24T12:52:35.000Z
|
#
# Copyright 2017-2018 Government of Canada
# Public Services and Procurement Canada - buyandsell.gc.ca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=invalid-name
import os
capture_output = True
daemon = False
enable_stdio_inheritance = True
preload_app = True
workers = 5
worker_class = 'aiohttp.GunicornWebWorker'
worker_connections = 60
timeout = 60
backlog = 100
keepalive = 2
proc_name = None
errorlog = '-'
loglevel = 'debug'
pythonpath = '.'
accesslog = '-'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
def on_starting(server):
server.log.debug('Importing von-x services: pid %s', os.getpid())
# import the shared manager instance before any processes are forked
# this is necessary for the pipes and locks to be inherited
from permitify.common import MANAGER
server.service_mgr = MANAGER
def when_ready(server):
server.log.debug('Starting von-x services: pid %s', os.getpid())
server.service_mgr.start_process()
def on_exit(server):
server.log.debug('Shutting down von-x services')
server.service_mgr.stop()
| 30.396226
| 81
| 0.733085
|
f1560e72f3cd6a5b82ed2d39e0cddf41fab7777c
| 9,026
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_securityconsole_package_cancel_install.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_securityconsole_package_cancel_install.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_securityconsole_package_cancel_install.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_securityconsole_package_cancel_install
short_description: Cancel policy install and clear preview cache.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
securityconsole_package_cancel_install:
description: the top level parameters set
required: false
type: dict
suboptions:
adom:
type: str
description: 'Source ADOM name.'
'''
EXAMPLES = '''
---
- name: INSTALL PREVIEW - POLICY PACKAGE
hosts: fmg
connection: httpapi
collections: fortinet.fortimanager
vars:
adom: demo
ppkg: ppkg_hubs
device: fgt_00_1
tasks:
- name: Install for policy package {{ adom }}/{{ ppkg }} [preview mode]
fmgr_securityconsole_install_package:
securityconsole_install_package:
adom: "{{ adom }}"
flags:
- preview
pkg: "{{ ppkg }}"
scope:
- name: "{{ device }}"
vdom: root
register: r
- name: Poll the task
fmgr_fact:
facts:
selector: 'task_task'
params:
task: '{{ r.meta.response_data.task }}'
register: taskinfo
until: taskinfo.meta.response_data.percent == 100
retries: 30
delay: 5
- name: Trigger the preview report generation for policy package {{ adom }}/{{ ppkg }}
fmgr_securityconsole_install_preview:
securityconsole_install_preview:
adom: "{{ adom }}"
device: "{{ device }}"
flags:
- json
vdoms: root
register: r
- name: Poll the task
fmgr_fact:
facts:
selector: 'task_task'
params:
task: '{{ r.meta.response_data.task }}'
register: taskinfo
until: taskinfo.meta.response_data.percent == 100
retries: 30
delay: 5
- name: Get the preview report for policy package {{ adom }}/{{ ppkg }}
fmgr_securityconsole_preview_result:
securityconsole_preview_result:
adom: "{{ adom }}"
device: "{{ device }}"
register: r
- name: Cancel install task for policy package {{ adom }}/{{ ppkg }}
fmgr_securityconsole_package_cancel_install:
securityconsole_package_cancel_install:
adom: "{{ adom }}"
- name: Show preview report
debug:
msg: "{{ r }}"
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/securityconsole/package/cancel/install'
]
perobject_jrpc_urls = [
'/securityconsole/package/cancel/install/{install}'
]
url_params = []
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'securityconsole_package_cancel_install': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'adom': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'securityconsole_package_cancel_install'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, None, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_exec(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 32.941606
| 153
| 0.597275
|
680f7a26d87694a029a39f7a3e0a8a6061decc7d
| 6,744
|
py
|
Python
|
ground/core/geometries.py
|
lycantropos/ground
|
ef6f54b8cb555af8d9202d621cac57a892ecb78d
|
[
"MIT"
] | 4
|
2021-05-15T19:15:56.000Z
|
2021-11-30T06:19:47.000Z
|
ground/core/geometries.py
|
lycantropos/ground
|
ef6f54b8cb555af8d9202d621cac57a892ecb78d
|
[
"MIT"
] | null | null | null |
ground/core/geometries.py
|
lycantropos/ground
|
ef6f54b8cb555af8d9202d621cac57a892ecb78d
|
[
"MIT"
] | null | null | null |
from operator import eq
from typing import (Sequence,
TypeVar)
from reprit.base import generate_repr
from . import hints
class Point:
__slots__ = '_x', '_y'
def __init__(self, x: hints.Scalar, y: hints.Scalar) -> None:
self._x, self._y = x, y
@property
def x(self) -> hints.Scalar:
return self._x
@property
def y(self) -> hints.Scalar:
return self._y
def __eq__(self, other: 'Point') -> bool:
return (self.x == other.x and self.y == other.y
if isinstance(other, Point)
else NotImplemented)
def __hash__(self) -> int:
return hash((self.x, self.y))
def __le__(self, other: 'Point') -> bool:
return (self.x < other.x or self.x == other.x and self.y <= other.y
if isinstance(other, Point)
else NotImplemented)
def __lt__(self, other: 'Point') -> bool:
return (self.x < other.x or self.x == other.x and self.y < other.y
if isinstance(other, Point)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Empty:
__slots__ = ()
def __init__(self) -> None:
pass
def __eq__(self, other: 'Empty'):
return isinstance(other, Empty) or NotImplemented
__repr__ = generate_repr(__init__)
class Multipoint:
__slots__ = '_points',
def __init__(self, points: Sequence[hints.Point]) -> None:
self._points = points
@property
def points(self) -> Sequence[hints.Point]:
return self._points
def __eq__(self, other: 'Multipoint') -> bool:
return (are_sequences_equivalent(self.points, other.points)
if isinstance(other, Multipoint)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Segment:
__slots__ = '_start', '_end'
def __init__(self, start: hints.Point, end: hints.Point) -> None:
self._start, self._end = start, end
@property
def start(self) -> hints.Point:
return self._start
@property
def end(self) -> hints.Point:
return self._end
def __eq__(self, other: 'Segment') -> bool:
return (self.start == other.start and self.end == other.end
or self.start == other.end and self.end == other.start
if isinstance(other, Segment)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Multisegment:
__slots__ = '_segments',
def __init__(self, segments: Sequence[hints.Segment]) -> None:
self._segments = segments
@property
def segments(self) -> Sequence[hints.Segment]:
return self._segments
def __eq__(self, other: 'Multisegment') -> bool:
return (are_sequences_equivalent(self.segments, other.segments)
if isinstance(other, Multisegment)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Contour:
__slots__ = '_vertices',
def __init__(self, vertices: Sequence[hints.Polygon]) -> None:
self._vertices = vertices
@property
def vertices(self) -> Sequence[hints.Polygon]:
return self._vertices
def __eq__(self, other: 'Contour') -> bool:
return (are_sequences_equivalent(self.vertices, other.vertices)
if isinstance(other, Contour)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Box:
__slots__ = '_min_x', '_max_x', '_min_y', '_max_y'
def __init__(self,
min_x: hints.Scalar,
max_x: hints.Scalar,
min_y: hints.Scalar,
max_y: hints.Scalar) -> None:
self._min_x, self._max_x, self._min_y, self._max_y = (min_x, max_x,
min_y, max_y)
@property
def max_x(self) -> hints.Scalar:
return self._max_x
@property
def max_y(self) -> hints.Scalar:
return self._max_y
@property
def min_x(self) -> hints.Scalar:
return self._min_x
@property
def min_y(self) -> hints.Scalar:
return self._min_y
def __eq__(self, other: 'Box') -> bool:
return (self.min_x == other.min_x and self.max_x == other.max_x
and self.min_y == other.min_y and self.max_y == other.max_y
if isinstance(other, Box)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Polygon:
__slots__ = '_border', '_holes'
def __init__(self, border: hints.Contour, holes: Sequence[hints.Contour]
) -> None:
self._border, self._holes = border, holes
@property
def border(self) -> hints.Contour:
return self._border
@property
def holes(self) -> Sequence[hints.Contour]:
return self._holes
def __eq__(self, other: 'Polygon') -> bool:
return (self.border == other.border
and are_sequences_equivalent(self.holes, other.holes)
if isinstance(other, Polygon)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Multipolygon:
__slots__ = '_polygons',
def __init__(self, polygons: Sequence[hints.Polygon]) -> None:
self._polygons = polygons
@property
def polygons(self) -> Sequence[hints.Polygon]:
return self._polygons
def __eq__(self, other: 'Multipolygon') -> bool:
return (are_sequences_equivalent(self.polygons, other.polygons)
if isinstance(other, Multipolygon)
else NotImplemented)
__repr__ = generate_repr(__init__)
class Mix:
__slots__ = '_discrete', '_linear', '_shaped'
def __init__(self,
discrete: hints.Maybe[hints.Multipoint],
linear: hints.Maybe[hints.Linear],
shaped: hints.Maybe[hints.Shaped]) -> None:
self._discrete, self._linear, self._shaped = discrete, linear, shaped
@property
def discrete(self) -> hints.Maybe[hints.Multipoint]:
return self._discrete
@property
def linear(self) -> hints.Maybe[hints.Linear]:
return self._linear
@property
def shaped(self) -> hints.Maybe[hints.Shaped]:
return self._shaped
def __eq__(self, other: 'Mix') -> bool:
return (self.discrete == other.discrete
and self.linear == other.linear
and self.shaped == other.shaped
if isinstance(other, Mix)
else NotImplemented)
__repr__ = generate_repr(__init__)
_T = TypeVar('_T')
def are_sequences_equivalent(left: Sequence[_T], right: Sequence[_T]) -> bool:
return len(left) == len(right) and all(map(eq, left, right))
| 27.193548
| 78
| 0.60261
|
28fa97566d173d1bb300b118d8c057ba6f97c4e6
| 5,482
|
py
|
Python
|
src/opendms/facelandmarksdetector.py
|
cledouarec/OpenDMS
|
cbbb40aa5d37ce10e2c3cd345cff3796b0bffdcb
|
[
"Apache-2.0"
] | null | null | null |
src/opendms/facelandmarksdetector.py
|
cledouarec/OpenDMS
|
cbbb40aa5d37ce10e2c3cd345cff3796b0bffdcb
|
[
"Apache-2.0"
] | null | null | null |
src/opendms/facelandmarksdetector.py
|
cledouarec/OpenDMS
|
cbbb40aa5d37ce10e2c3cd345cff3796b0bffdcb
|
[
"Apache-2.0"
] | null | null | null |
#! python3
"""
All functions related to face landmarks detection
"""
import logging
from typing import List
import cv2
import dlib
import numpy as np
import pkg_resources
class FaceLandmarksDetector:
"""
Base class for face landmarks detector.
"""
def run(self, image, faces: List) -> List:
"""
Find landmarks on given `faces` detected on `image`.
:param image: Input image
:param faces: List of faces
:return: List of faces detected
"""
class DLibFaceLandmarksDetector(FaceLandmarksDetector):
"""
This class is used to find landmarks on face based on DLib classifier.
"""
def __init__(self, dnn_model: str = None):
"""
Constructs DLib landmarks detector.
"""
logging.info("Create face landmarks detector based on DLib classifier")
if dnn_model is None:
dnn_model = pkg_resources.resource_filename(
__name__, "data/shape_predictor_68_face_landmarks.dat"
)
#: DLib landmarks classifier
self.__detector = dlib.shape_predictor(dnn_model)
def run(self, image, faces: List) -> List:
"""
Find landmarks on given `faces` detected on `image`.
:param image: Input image
:param faces: List of faces
:return: List of faces detected
"""
# Preprocess image
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
all_faces_landmarks_list = []
for face in faces:
dlib_rect = dlib.rectangle(
left=face[0][0],
top=face[0][1],
right=face[1][0],
bottom=face[1][1],
)
# Run classifier
landmarks = self.__detector(image_bw, dlib_rect)
landmarks_list = []
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
landmarks_list.append((x, y))
all_faces_landmarks_list.append(landmarks_list)
return all_faces_landmarks_list
class LBFFaceLandmarksDetector(FaceLandmarksDetector):
"""
This class is used to find landmarks on face based on LBF classifier.
"""
def __init__(self, lbf_model: str = None):
"""
Constructs LBF landmarks detector.
"""
logging.info("Create face landmarks detector based on LBF classifier")
if lbf_model is None:
lbf_model = pkg_resources.resource_filename(
__name__, "data/lbfmodel.yaml"
)
#: LBF landmarks classifier
self.__detector = cv2.face.createFacemarkLBF()
self.__detector.loadModel(lbf_model)
def run(self, image, faces: List) -> List:
"""
Find landmarks on given `faces` detected on `image`.
:param image: Input image
:param faces: List of faces
:return: List of faces detected
"""
# Preprocess image
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces_converted = []
for face in faces:
faces_converted.append(
[
face[0][0], # - 100,
face[0][1],
face[1][0] - face[0][0], # + 200,
face[1][1] - face[0][1],
]
)
# Run classifier
_, landmarks = self.__detector.fit(
image_bw, np.array([faces_converted])
)
# Normalize results
for landmark in landmarks:
for x, y in landmark[0]:
# display landmarks on "frame/image,"
# with blue colour in BGR and thickness 1
cv2.circle(image, (int(x), int(y)), 1, (255, 0, 0), 1)
return []
class KazemiFaceLandmarksDetector(FaceLandmarksDetector):
"""
This class is used to find landmarks on face based on Kazemi classifier.
"""
def __init__(self, lbf_model: str = None):
"""
Constructs Kazemi landmarks detector.
"""
logging.info(
"Create face landmarks detector based on Kazemi classifier"
)
if lbf_model is None:
lbf_model = pkg_resources.resource_filename(
__name__, "data/lbfmodel.yaml"
)
#: Kazemi landmarks classifier
self.__detector = cv2.face.createFacemarkKazemi()
def run(self, image, faces: List) -> List:
"""
Find landmarks on given `faces` detected on `image`.
:param image: Input image
:param faces: List of faces
:return: List of faces detected
"""
# Preprocess image
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces_converted = []
for face in faces:
faces_converted.append(
[
face[0][0] - 100,
face[0][1],
face[1][0] - face[0][0] + 200,
face[1][1] - face[0][1],
]
)
# Run classifier
_, landmarks = self.__detector.fit(
image_bw, np.array([faces_converted])
)
# Normalize results
for landmark in landmarks:
for x, y in landmark[0]:
# display landmarks on "frame/image,"
# with blue colour in BGR and thickness 1
cv2.circle(image, (x, y), 1, (255, 0, 0), 1)
return []
| 28.257732
| 79
| 0.547975
|
1c82aaf2f93ec5e3f4e804000c74bfe476422ec0
| 7,171
|
py
|
Python
|
p1_navigation/dqn_agent.py
|
brianx0215/deep-reinforcement-learning
|
606ccb5eb1b302514567e33dcfdb372942671af7
|
[
"MIT"
] | null | null | null |
p1_navigation/dqn_agent.py
|
brianx0215/deep-reinforcement-learning
|
606ccb5eb1b302514567e33dcfdb372942671af7
|
[
"MIT"
] | null | null | null |
p1_navigation/dqn_agent.py
|
brianx0215/deep-reinforcement-learning
|
606ccb5eb1b302514567e33dcfdb372942671af7
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
from collections import namedtuple, deque
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # interpolation parameter for soft update of target parameters
LR = 1e-3 # learning rate
UPDATE_EVERY = 4 # how often to update the network
use_cuda = torch.cuda.is_available()
class DQN(nn.Module):
#Customized DQN class for Udacity deep reinforcement learning project 1
def __init__(self, state_size, action_size, seed):
super(DQN, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, action_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def conv(in_channels, out_channels, kernel_size, stride = 1, padding = 1, batch_norm = True):
layers = []
conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias = False)
layers.append(conv_layer)
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels))
return nn.Sequential(*layers)
class CNNDQN(nn.Module):
#Customized CNN and DQN class for Udacity deep reinforcement learning project 1
#The class is referenced from the projects in Udacity deep learning course.
def __init__(self, state_size, action_size, seed):
super(CNNDQN, self).__init__()
self.seed = torch.manual_seed(seed)
self.conv1 = conv(3, 4, 3, batch_norm = False)
self.conv2 = conv(4, 8, 3)
self.conv3 = conv(8, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(800, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, action_size)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 800)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Agent():
# The agent interacts with and learns from the banana environment.
def __init__(self, state_size, action_size, visual_input, seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# we use Double DQN for the agent
if visual_input:
self.dqn_local = CNNDQN(state_size, action_size, seed)
self.dqn_target = CNNDQN(state_size, action_size, seed)
else:
self.dqn_local = DQN(state_size, action_size, seed)
self.dqn_target = DQN(state_size, action_size, seed)
self.optimizer = optim.Adam(self.dqn_local.parameters(), lr=LR)
if use_cuda:
dqn_local, dqn_target = dqn_local.cuda(), dqn_target.cuda()
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
self.step_counter = 0
def step(self, state, action, reward, next_state, done):
# Record the experience, update the network when running enough step.
self.memory.add(state, action, reward, next_state, done)
self.step_counter = (self.step_counter + 1) % UPDATE_EVERY
if self.step_counter == 0:
if len(self.memory) >= BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.0):
#Returns action for given state as per current policy.
state = torch.from_numpy(state).float().unsqueeze(0)
if use_cuda:
state = state.cuda()
self.dqn_local.eval()
with torch.no_grad():
action_values = self.dqn_local(state)
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
#Update value parameters using given batch of experience tuples.
states, actions, rewards, next_states, dones = experiences
self.dqn_local.train()
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.dqn_target(next_states).detach().max(1)[0]
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
# 64, 4 64
Q_expected = self.dqn_local(states).gather(1, actions.unsqueeze(1)).squeeze(1)
loss = F.mse_loss(Q_expected, Q_targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
#update target network
self.soft_update(self.dqn_local, self.dqn_target, TAU)
def soft_update(self, local_model, target_model, tau):
#Soft update model parameters.
#θ_target = τ * θ_local + (1 - τ) * θ_target
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def save(self, path):
torch.save(self.dqn_local.state_dict(), path)
def load(self, path):
self.dqn_local.load_state_dict(torch.load(path))
self.dqn_target = self.dqn_local
class ReplayBuffer:
#Fixed-size buffer to store experience tuples.
def __init__(self, action_size, buffer_size, batch_size, seed):
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
#Randomly sample a batch of experiences from memory.
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.FloatTensor([e.state for e in experiences if e is not None])
actions = torch.LongTensor([e.action for e in experiences if e is not None])
rewards = torch.FloatTensor([e.reward for e in experiences if e is not None])
next_states = torch.FloatTensor([e.next_state for e in experiences if e is not None])
dones = torch.FloatTensor([e.done for e in experiences if e is not None])
if use_cuda:
states, actions, rewards, next_states, dones = states.cuda(), actions.cuda(), rewards.cuda(), next_states.cuda(), dones.cuda()
return (states, actions, rewards, next_states, dones)
def __len__(self):
return len(self.memory)
| 38.143617
| 138
| 0.633803
|
30cc5f16658a76ab7ae64c0a07d031357d1c8c4e
| 13,900
|
py
|
Python
|
mydata_did/v1_0/utils/diddoc.py
|
decentralised-dataexchange/acapy-mydata-did-protocol
|
c84d86d12689cfb1a29d43734ee27a03ccdf8d77
|
[
"Apache-2.0"
] | 1
|
2022-02-10T17:51:27.000Z
|
2022-02-10T17:51:27.000Z
|
mydata_did/v1_0/utils/diddoc.py
|
decentralised-dataexchange/acapy-mydata-did-protocol
|
c84d86d12689cfb1a29d43734ee27a03ccdf8d77
|
[
"Apache-2.0"
] | 12
|
2021-09-19T14:27:56.000Z
|
2022-03-28T13:31:58.000Z
|
mydata_did/v1_0/utils/diddoc.py
|
decentralised-dataexchange/acapy-mydata-did-protocol
|
c84d86d12689cfb1a29d43734ee27a03ccdf8d77
|
[
"Apache-2.0"
] | 1
|
2022-01-03T14:09:05.000Z
|
2022-01-03T14:09:05.000Z
|
"""
DID Document classes.
Copyright 2017-2019 Government of Canada
Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
from typing import List, Sequence, Union
if __name__ == "__main__":
from mydata_did.v1_0.utils.verification_method import PublicKey, PublicKeyType
from mydata_did.v1_0.utils.service import Service
from mydata_did.v1_0.utils.util import canon_did, canon_ref, ok_did, resource, derive_did_type
else:
from .verification_method import PublicKey, PublicKeyType
from .service import Service
from .util import canon_did, canon_ref, ok_did, resource, derive_did_type
LOGGER = logging.getLogger(__name__)
class DIDDoc:
"""
DID document, grouping a DID with verification keys and services.
Retains DIDs as raw values (orientated toward indy-facing operations),
everything else as URIs (oriented toward W3C-facing operations).
"""
CONTEXT = "https://w3id.org/did/v1"
def __init__(self, did: str = None) -> None:
"""
Initialize the DIDDoc instance.
Retain DID ('id' in DIDDoc context); initialize verification keys
and services to empty lists.
Args:
did: DID for current DIDdoc
Raises:
ValueError: for bad input DID.
"""
# allow specification post-hoc
self._did = canon_did(did) if did else None
self._did_type = derive_did_type(did)
self._pubkey = {}
self._service = {}
@property
def did_type(self) -> str:
return self._did_type
@property
def did(self) -> str:
"""Accessor for DID."""
return self._did
@did.setter
def did(self, value: str) -> None:
"""
Set DID ('id' in DIDDoc context).
Args:
value: DID
Raises:
ValueError: for bad input DID.
"""
self._did = canon_did(value) if value else None
self._did_type = derive_did_type(value)
@property
def pubkey(self) -> dict:
"""Accessor for public keys by identifier."""
return self._pubkey
@property
def authnkey(self) -> dict:
"""Accessor for public keys marked as authentication keys, by identifier."""
return {k: self._pubkey[k] for k in self._pubkey if self._pubkey[k].authn}
@property
def service(self) -> dict:
"""Accessor for services by identifier."""
return self._service
def set(self, item: Union[Service, PublicKey]) -> "DIDDoc":
"""
Add or replace service or public key; return current DIDDoc.
Raises:
ValueError: if input item is neither service nor public key.
Args:
item: service or public key to set
Returns: the current DIDDoc
"""
if isinstance(item, Service):
self.service[item.id] = item
elif isinstance(item, PublicKey):
self.pubkey[item.id] = item
else:
raise ValueError(
"Cannot add item {} to DIDDoc on DID {}".format(item, self.did)
)
def serialize(self) -> str:
"""
Dump current object to a JSON-compatible dictionary.
Returns:
dict representation of current DIDDoc
"""
return {
"@context": DIDDoc.CONTEXT,
"id": canon_ref(self.did, self.did, did_type=self.did_type),
"verificationMethod": [pubkey.to_dict() for pubkey in self.pubkey.values()],
"authentication": [
{
"type": pubkey.type.authn_type,
"publicKey": canon_ref(self.did, pubkey.id),
}
for pubkey in self.pubkey.values()
if pubkey.authn
],
"service": [service.to_dict() for service in self.service.values()],
}
def to_json(self) -> str:
"""
Dump current object as json (JSON-LD).
Returns:
json representation of current DIDDoc
"""
return json.dumps(self.serialize())
def add_service_pubkeys(
self, service: dict, tags: Union[Sequence[str], str]
) -> List[PublicKey]:
"""
Add public keys specified in service. Return public keys so discovered.
Args:
service: service from DID document
tags: potential tags marking public keys of type of interest
(the standard is still coalescing)
Raises:
ValueError: for public key reference not present in DID document.
Returns: list of public keys from the document service specification
"""
rv = []
for tag in [tags] if isinstance(tags, str) else list(tags):
for svc_key in service.get(tag, {}):
canon_key = canon_ref(self.did, svc_key)
pubkey = None
if "#" in svc_key:
if canon_key in self.pubkey:
pubkey = self.pubkey[canon_key]
else: # service key refers to another DID doc
LOGGER.debug(
"DID document %s has no public key %s", self.did, svc_key
)
raise ValueError(
"DID document {} has no public key {}".format(
self.did, svc_key
)
)
else:
for existing_pubkey in self.pubkey.values():
if existing_pubkey.value == svc_key:
pubkey = existing_pubkey
break
else:
pubkey = PublicKey(
self.did,
# industrial-grade uniqueness
ident=svc_key[-9:-1],
value=svc_key,
)
self._pubkey[pubkey.id] = pubkey
if (
pubkey and pubkey not in rv
): # perverse case: could specify same key multiple ways; append once
rv.append(pubkey)
return rv
@classmethod
def deserialize(cls, did_doc: dict) -> "DIDDoc":
"""
Construct DIDDoc object from dict representation.
Args:
did_doc: DIDDoc dict representation
Raises:
ValueError: for bad DID or missing mandatory item.
Returns: DIDDoc from input json
"""
rv = None
if "id" in did_doc:
rv = DIDDoc(did_doc["id"])
else:
# heuristic: get DID to serve as DID document identifier from
# the first OK-looking public key
for section in ("verificationMethod", "authentication"):
if rv is None and section in did_doc:
for key_spec in did_doc[section]:
try:
pubkey_did = canon_did(
resource(key_spec.get("id", "")))
if ok_did(pubkey_did):
rv = DIDDoc(pubkey_did)
break
except ValueError: # no identifier here, move on to next
break
if rv is None:
LOGGER.debug("no identifier in DID document")
raise ValueError("No identifier in DID document")
for pubkey in did_doc.get(
"verificationMethod", {}
): # include all public keys, authentication pubkeys by reference
pubkey_type = PublicKeyType.get(pubkey["type"])
authn = any(
canon_ref(rv.did, ak.get("publicKey", ""))
== canon_ref(rv.did, pubkey["id"])
for ak in did_doc.get("authentication", {})
if isinstance(ak.get("publicKey", None), str)
)
key = PublicKey( # initialization canonicalizes id
rv.did,
pubkey["id"],
pubkey[pubkey_type.specifier],
pubkey_type,
canon_did(pubkey["controller"]),
authn,
)
rv.pubkey[key.id] = key
for akey in did_doc.get(
"authentication", {}
): # include embedded authentication keys
if "publicKey" not in akey: # not yet got it with public keys
pubkey_type = PublicKeyType.get(akey["type"])
key = PublicKey( # initialization canonicalized id
rv.did,
akey["id"],
akey[pubkey_type.specifier],
pubkey_type,
canon_did(akey["controller"]),
True,
)
rv.pubkey[key.id] = key
for service in did_doc.get("service", {}):
endpoint = service["serviceEndpoint"]
svc = Service( # initialization canonicalizes id
rv.did,
service.get(
"id",
canon_ref(
rv.did, "assigned-service-{}".format(
len(rv.service)), ";"
),
),
service["type"],
rv.add_service_pubkeys(service, "recipientKeys"),
rv.add_service_pubkeys(
service, ["mediatorKeys", "routingKeys"]),
canon_ref(rv.did, endpoint,
";") if ";" in endpoint else endpoint,
service.get("priority", None),
)
rv.service[svc.id] = svc
return rv
@classmethod
def from_json(cls, did_doc_json: str) -> "DIDDoc":
"""
Construct DIDDoc object from json representation.
Args:
did_doc_json: DIDDoc json representation
Returns: DIDDoc from input json
"""
return cls.deserialize(json.loads(did_doc_json))
def validate(self):
# FIXME : Code refactor
# check public key and authentication key is available and if so, a single match item
if self.pubkey and self.authnkey and len(self.pubkey.values()) == 1 and len(self.authnkey.values()) == 1:
# check if public key and authentication key match
if list(self.pubkey.keys())[0] == list(self.authnkey.keys())[0]:
# check if controller and did and publicKeyBase58 matches
# check if public key type is Ed25519VerificationKey2018
public_key: PublicKey = list(self.pubkey.values())[0]
if public_key.controller == self.did and public_key.type == PublicKeyType.ED25519_SIG_2018:
# Optional check, if service is available
if self.service:
if len(self.service.values()) == 1 and canon_ref(self.did, "didcomm", ";", did_type=self.did_type) == list(self.service.keys())[0]:
service: Service = list(self.service.values())[0]
if len(service.recip_keys) == 1 and service.recip_keys[0].type == public_key.type and service.recip_keys[0].controller == public_key.controller and service.type == "DIDComm" and service.priority == 0 and service.did == self.did:
return True
else:
return True
return False
def __str__(self) -> str:
"""Return string representation for abbreviated display."""
return f"DIDDoc({self.did})"
def __repr__(self) -> str:
"""Format DIDDoc for logging."""
return f"<DIDDoc did={self.did}>"
if __name__ == "__main__":
import json
from mydata_did.v1_0.utils.diddoc import DIDDoc
diddoc_json = {
"@context": "https://w3id.org/did/v1",
"id": "did:mydata:0:z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E",
"verificationMethod": [
{
"id": "did:mydata:0:z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E#1",
"type": "Ed25519VerificationKey2018",
"controller": "did:mydata:0:z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E",
"publicKeyBase58": "z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E"
}
],
"authentication": [
{
"type": "Ed25519VerificationKey2018",
"publicKey": "did:mydata:0:z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E#1"
}
],
"service": [
{
"id": "did:mydata:0:z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E;didcomm",
"type": "DIDComm",
"priority": 0,
"recipientKeys": [
"z6MkfiSdYhnLnS6jfwSf2yS2CiwwjZGmFUFL5QbyL2Xu8z2E"
],
"serviceEndpoint": "https://ada-agent.example.com/service-x"
}
]
}
diddoc_str = json.dumps(diddoc_json)
diddoc = DIDDoc.from_json(diddoc_str)
if diddoc.validate():
print("\nValidation checks passed...")
else:
print("\nValidation checks failed...")
| 34.152334
| 256
| 0.542086
|
2f2c0a0792b07862781bbf652c6aff1b7a94fd76
| 3,354
|
py
|
Python
|
src/python/pants/backend/jvm/tasks/properties.py
|
dturner-tw/pants
|
3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/jvm/tasks/properties.py
|
dturner-tw/pants
|
3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/jvm/tasks/properties.py
|
dturner-tw/pants
|
3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f
|
[
"Apache-2.0"
] | 1
|
2019-06-10T17:24:34.000Z
|
2019-06-10T17:24:34.000Z
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
from collections import OrderedDict
import six
class Properties(object):
"""A Python reader for java.util.Properties formatted data.
Based on:
http://download.oracle.com/javase/6/docs/api/java/util/Properties.html#load(java.io.Reader)
Originally copied from:
https://github.com/twitter/commons/blob/master/src/python/twitter/common/config/properties.py
"""
@staticmethod
def load(data):
"""Loads properties from an open stream or the contents of a string.
:param (string | open stream) data: An open stream or a string.
:returns: A dict of parsed property data.
:rtype: dict
"""
if hasattr(data, 'read') and callable(data.read):
contents = data.read()
elif isinstance(data, six.string_types):
contents = data
else:
raise TypeError('Can only process data from a string or a readable object, given: %s' % data)
return Properties._parse(contents.splitlines())
# An unescaped '=' or ':' forms an explicit separator
_EXPLICIT_KV_SEP = re.compile(r'(?<!\\)[=:]')
@staticmethod
def _parse(lines):
def coalesce_lines():
line_iter = iter(lines)
try:
buffer = ''
while True:
line = next(line_iter)
if line.strip().endswith('\\'):
# Continuation.
buffer += line.strip()[:-1]
else:
if buffer:
# Continuation join, preserve left hand ws (could be a kv separator)
buffer += line.rstrip()
else:
# Plain old line
buffer = line.strip()
try:
yield buffer
finally:
buffer = ''
except StopIteration:
pass
def normalize(atom):
return re.sub(r'\\([:=\s])', r'\1', atom.strip())
def parse_line(line):
if line and not (line.startswith('#') or line.startswith('!')):
match = Properties._EXPLICIT_KV_SEP.search(line)
if match:
return normalize(line[:match.start()]), normalize(line[match.end():])
else:
space_sep = line.find(' ')
if space_sep == -1:
return normalize(line), ''
else:
return normalize(line[:space_sep]), normalize(line[space_sep:])
props = OrderedDict()
for line in coalesce_lines():
kv_pair = parse_line(line)
if kv_pair:
key, value = kv_pair
props[key] = value
return props
@staticmethod
def dump(props, output):
"""Dumps a dict of properties to the specified open stream or file path."""
def escape(token):
return re.sub(r'([=:\s])', r'\\\1', token)
def write(out):
for k, v in props.items():
out.write('%s=%s\n' % (escape(str(k)), escape(str(v))))
if hasattr(output, 'write') and callable(output.write):
write(output)
elif isinstance(output, six.string_types):
with open(output, 'w+a') as out:
write(out)
else:
raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)
| 29.946429
| 99
| 0.606738
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.