hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a647ff89aa7316a12058cb24cc43cff1b4dc0ec
| 458
|
py
|
Python
|
crossentropy_.py
|
zhoufengfan/crossentropy
|
b53f53ba01ffe2acf3c35ba0fb7ab8e201a5ac14
|
[
"MIT"
] | null | null | null |
crossentropy_.py
|
zhoufengfan/crossentropy
|
b53f53ba01ffe2acf3c35ba0fb7ab8e201a5ac14
|
[
"MIT"
] | null | null | null |
crossentropy_.py
|
zhoufengfan/crossentropy
|
b53f53ba01ffe2acf3c35ba0fb7ab8e201a5ac14
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
if __name__ == '__main__':
t1 = torch.tensor([[1, 2]]).float()
t3 = torch.tensor([1])
t2 = F.cross_entropy(input=t1, target=t3, reduction="mean")
print("t2 is", t2)
t1_softmaxed = F.softmax(t1, dim=1)
minus_log_t1_softmaxed = -torch.log(t1_softmaxed)
# print("minus_log_t1_softmaxed is", minus_log_t1_softmaxed)
print("minus_log_t1_softmaxed[1] is", minus_log_t1_softmaxed[0][1])
| 35.230769
| 71
| 0.69214
|
800ae062b123cbe5676f07ea4327e150b54a8b13
| 14,727
|
py
|
Python
|
qiskit/quantum_info/states/densitymatrix.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 2
|
2019-06-28T19:58:42.000Z
|
2019-07-26T05:04:02.000Z
|
qiskit/quantum_info/states/densitymatrix.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | null | null | null |
qiskit/quantum_info/states/densitymatrix.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 1
|
2020-01-24T21:01:06.000Z
|
2020-01-24T21:01:06.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
DensityMatrix quantum state class.
"""
from numbers import Number
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.instruction import Instruction
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.states.quantum_state import QuantumState
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.predicates import is_hermitian_matrix
from qiskit.quantum_info.operators.predicates import is_positive_semidefinite_matrix
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.channel.superop import SuperOp
from qiskit.quantum_info.states.statevector import Statevector
from qiskit.quantum_info.states.counts import state_to_counts
class DensityMatrix(QuantumState):
"""DensityMatrix class"""
def __init__(self, data, dims=None):
"""Initialize a state object."""
if isinstance(data, Statevector):
# We convert a statevector into a density matrix by taking the projector
state_vec = data.data
mat = np.outer(state_vec, np.conjugate(state_vec))
if dims is None:
dims = data.dims()
elif hasattr(data, 'to_operator'):
# If the data object has a 'to_operator' attribute this is given
# higher preference than the 'to_matrix' method for initializing
# an Operator object.
data = data.to_operator()
mat = data.data
if dims is None:
dims = data.output_dims()
elif hasattr(data, 'to_matrix'):
# If no 'to_operator' attribute exists we next look for a
# 'to_matrix' attribute to a matrix that will be cast into
# a complex numpy matrix.
mat = np.array(data.to_matrix(), dtype=complex)
elif isinstance(data, (list, np.ndarray)):
# Finally we check if the input is a raw matrix in either a
# python list or numpy array format.
mat = np.array(data, dtype=complex)
else:
raise QiskitError("Invalid input data format for DensityMatrix")
# Convert statevector into a density matrix
if mat.ndim == 2 and mat.shape[1] == 1:
mat = np.reshape(mat, mat.shape[0])
if mat.ndim == 1:
mat = np.outer(mat, np.conj(mat))
# Determine input and output dimensions
if mat.ndim != 2 or mat.shape[0] != mat.shape[1]:
raise QiskitError(
"Invalid DensityMatrix input: not a square matrix.")
subsystem_dims = self._automatic_dims(dims, mat.shape[0])
super().__init__('DensityMatrix', mat, subsystem_dims)
def is_valid(self, atol=None, rtol=None):
"""Return True if trace 1 and positive semidefinite."""
if atol is None:
atol = self._atol
if rtol is None:
rtol = self._rtol
# Check trace == 1
if not np.allclose(self.trace(), 1, rtol=rtol, atol=atol):
return False
# Check Hermitian
if not is_hermitian_matrix(self.data, rtol=rtol, atol=atol):
return False
# Check positive semidefinite
return is_positive_semidefinite_matrix(self.data, rtol=rtol, atol=atol)
def to_operator(self):
"""Convert to Operator"""
dims = self.dims()
return Operator(self.data, input_dims=dims, output_dims=dims)
def conjugate(self):
"""Return the conjugate of the density matrix."""
return DensityMatrix(np.conj(self.data), dims=self.dims())
def trace(self):
"""Return the trace of the density matrix."""
return np.trace(self.data)
def purity(self):
"""Return the purity of the quantum state."""
# For a valid statevector the purity is always 1, however if we simply
# have an arbitrary vector (not correctly normalized) then the
# purity is equivalent to the trace squared:
# P(|psi>) = Tr[|psi><psi|psi><psi|] = |<psi|psi>|^2
return np.trace(np.dot(self.data, self.data))
def tensor(self, other):
"""Return the tensor product state self ⊗ other.
Args:
other (DensityMatrix): a quantum state object.
Returns:
DensityMatrix: the tensor product operator self ⊗ other.
Raises:
QiskitError: if other is not a quantum state.
"""
if not isinstance(other, DensityMatrix):
other = DensityMatrix(other)
dims = other.dims() + self.dims()
data = np.kron(self._data, other._data)
return DensityMatrix(data, dims)
def expand(self, other):
"""Return the tensor product state other ⊗ self.
Args:
other (DensityMatrix): a quantum state object.
Returns:
DensityMatrix: the tensor product state other ⊗ self.
Raises:
QiskitError: if other is not a quantum state.
"""
if not isinstance(other, DensityMatrix):
other = DensityMatrix(other)
dims = self.dims() + other.dims()
data = np.kron(other._data, self._data)
return DensityMatrix(data, dims)
def add(self, other):
"""Return the linear combination self + other.
Args:
other (DensityMatrix): a quantum state object.
Returns:
DensityMatrix: the linear combination self + other.
Raises:
QiskitError: if other is not a quantum state, or has
incompatible dimensions.
"""
if not isinstance(other, DensityMatrix):
other = DensityMatrix(other)
if self.dim != other.dim:
raise QiskitError("other DensityMatrix has different dimensions.")
return DensityMatrix(self.data + other.data, self.dims())
def subtract(self, other):
"""Return the linear operator self - other.
Args:
other (DensityMatrix): a quantum state object.
Returns:
DensityMatrix: the linear combination self - other.
Raises:
QiskitError: if other is not a quantum state, or has
incompatible dimensions.
"""
if not isinstance(other, DensityMatrix):
other = DensityMatrix(other)
if self.dim != other.dim:
raise QiskitError("other DensityMatrix has different dimensions.")
return DensityMatrix(self.data - other.data, self.dims())
def multiply(self, other):
"""Return the linear operator self * other.
Args:
other (complex): a complex number.
Returns:
DensityMatrix: the linear combination other * self.
Raises:
QiskitError: if other is not a valid complex number.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return DensityMatrix(other * self.data, self.dims())
def evolve(self, other, qargs=None):
"""Evolve a quantum state by an operator.
Args:
other (Operator or QuantumChannel
or Instruction or Circuit): The operator to evolve by.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
# Evolution by a circuit or instruction
if isinstance(other, (QuantumCircuit, Instruction)):
return self._evolve_instruction(other, qargs=qargs)
# Evolution by a QuantumChannel
if hasattr(other, 'to_quantumchannel'):
other = other.to_quantumchannel()
if isinstance(other, QuantumChannel):
return other._evolve(self, qargs=qargs)
# Unitary evolution by an Operator
return self._evolve_operator(other, qargs=qargs)
@classmethod
def from_label(cls, label):
"""Return a tensor product of Pauli X,Y,Z eigenstates.
Args:
label (string): a eigenstate string ket label 0,1,+,-,r,l.
Returns:
Statevector: The N-qubit basis state density matrix.
Raises:
QiskitError: if the label contains invalid characters, or the length
of the label is larger than an explicitly specified num_qubits.
Additional Information:
The labels correspond to the single-qubit states:
'0': [[1, 0], [0, 0]]
'1': [[0, 0], [0, 1]]
'+': [[0.5, 0.5], [0.5 , 0.5]]
'-': [[0.5, -0.5], [-0.5 , 0.5]]
'r': [[0.5, -0.5j], [0.5j , 0.5]]
'l': [[0.5, 0.5j], [-0.5j , 0.5]]
"""
return DensityMatrix(Statevector.from_label(label))
@classmethod
def from_instruction(cls, instruction):
"""Return the output density matrix of an instruction.
The statevector is initialized in the state :math:`|{0,\\ldots,0}\\rangle` of
the same number of qubits as the input instruction or circuit, evolved
by the input instruction, and the output statevector returned.
Args:
instruction (qiskit.circuit.Instruction or QuantumCircuit): instruction or circuit
Returns:
DensityMatrix: the final density matrix.
Raises:
QiskitError: if the instruction contains invalid instructions for
density matrix simulation.
"""
# Convert circuit to an instruction
if isinstance(instruction, QuantumCircuit):
instruction = instruction.to_instruction()
# Initialize an the statevector in the all |0> state
n_qubits = instruction.num_qubits
init = np.zeros((2**n_qubits, 2**n_qubits), dtype=complex)
init[0, 0] = 1
vec = DensityMatrix(init, dims=n_qubits * [2])
vec._append_instruction(instruction)
return vec
@property
def _shape(self):
"""Return the tensor shape of the matrix operator"""
return 2 * tuple(reversed(self.dims()))
def _evolve_operator(self, other, qargs=None):
"""Evolve density matrix by an operator"""
if not isinstance(other, Operator):
other = Operator(other)
if qargs is None:
# Evolution on full matrix
if self._dim != other._input_dim:
raise QiskitError(
"Operator input dimension is not equal to density matrix dimension."
)
mat = np.dot(other.data, self.data).dot(other.adjoint().data)
return DensityMatrix(mat, dims=other.output_dims())
# Otherwise we are applying an operator only to subsystems
# Check dimensions of subsystems match the operator
if self.dims(qargs) != other.input_dims():
raise QiskitError(
"Operator input dimensions are not equal to statevector subsystem dimensions."
)
# Reshape statevector and operator
tensor = np.reshape(self.data, self._shape)
# Construct list of tensor indices of statevector to be contracted
num_indices = len(self.dims())
indices = [num_indices - 1 - qubit for qubit in qargs]
# Left multiple by mat
mat = np.reshape(other.data, other._shape)
tensor = Operator._einsum_matmul(tensor, mat, indices)
# Right multiply by mat ** dagger
adj = other.adjoint()
mat_adj = np.reshape(adj.data, adj._shape)
tensor = Operator._einsum_matmul(tensor, mat_adj, indices, num_indices,
True)
# Replace evolved dimensions
new_dims = list(self.dims())
for i, qubit in enumerate(qargs):
new_dims[qubit] = other._output_dims[i]
new_dim = np.product(new_dims)
return DensityMatrix(np.reshape(tensor, (new_dim, new_dim)),
dims=new_dims)
def _append_instruction(self, other, qargs=None):
"""Update the current Statevector by applying an instruction."""
# Try evolving by a matrix operator (unitary-like evolution)
mat = Operator._instruction_to_matrix(other)
if mat is not None:
self._data = self._evolve_operator(Operator(mat), qargs=qargs).data
return
# Otherwise try evolving by a Superoperator
chan = SuperOp._instruction_to_superop(other)
if chan is not None:
# Evolve current state by the superoperator
self._data = chan._evolve(self, qargs=qargs).data
return
# If the instruction doesn't have a matrix defined we use its
# circuit decomposition definition if it exists, otherwise we
# cannot compose this gate and raise an error.
if other.definition is None:
raise QiskitError('Cannot apply Instruction: {}'.format(
other.name))
for instr, qregs, cregs in other.definition:
if cregs:
raise QiskitError(
'Cannot apply instruction with classical registers: {}'.
format(instr.name))
# Get the integer position of the flat register
if qargs is None:
new_qargs = [tup.index for tup in qregs]
else:
new_qargs = [qargs[tup.index] for tup in qregs]
self._append_instruction(instr, qargs=new_qargs)
def _evolve_instruction(self, obj, qargs=None):
"""Return a new statevector by applying an instruction."""
if isinstance(obj, QuantumCircuit):
obj = obj.to_instruction()
vec = DensityMatrix(self.data, dims=self.dims())
vec._append_instruction(obj, qargs=qargs)
return vec
def to_counts(self):
"""Returns the density matrix as a counts dict
of probabilities.
Returns:
dict: Counts of probabilities.
"""
return state_to_counts(self.data.diagonal(), self._atol, True)
| 38.85752
| 94
| 0.617505
|
dcfb10e4841bc8327701bae78b353e2342517d59
| 358
|
py
|
Python
|
lora_receiver.py
|
sbcshop1/Lora-HAT-for-Raspberry-Pi
|
253a02982393d419214916eaadb6eccbe7020d72
|
[
"MIT"
] | 2
|
2022-01-26T14:03:13.000Z
|
2022-02-23T21:27:35.000Z
|
lora_receiver.py
|
sbcshop1/Lora-HAT-for-Raspberry-Pi
|
253a02982393d419214916eaadb6eccbe7020d72
|
[
"MIT"
] | 2
|
2022-02-07T17:43:12.000Z
|
2022-03-29T12:31:59.000Z
|
lora_receiver.py
|
sbcshop1/Lora-HAT-for-Raspberry-Pi
|
253a02982393d419214916eaadb6eccbe7020d72
|
[
"MIT"
] | 1
|
2022-03-29T12:36:03.000Z
|
2022-03-29T12:36:03.000Z
|
#receiver
import serial
import time
lora = serial.Serial(port = '/dev/ttyS0', baudrate = 9600, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, bytesize = serial.EIGHTBITS, timeout = 1)
while True:
data_read = lora.readline()#read data from other lora
data = data_read.decode("utf-8")#convert byte into string
print(data)
| 27.538462
| 161
| 0.709497
|
f47d7c77c1a6a81f9f7aa9977ad9b6db65db3bbe
| 4,222
|
py
|
Python
|
setup.py
|
Mukesh23singh/aws-service-catalog-puppet
|
7d90f1533868e3ba14e3243a3d5d90ec9bca14e1
|
[
"Apache-2.0"
] | 1
|
2020-05-23T06:32:38.000Z
|
2020-05-23T06:32:38.000Z
|
setup.py
|
Mukesh23singh/aws-service-catalog-puppet
|
7d90f1533868e3ba14e3243a3d5d90ec9bca14e1
|
[
"Apache-2.0"
] | 8
|
2021-06-14T23:25:24.000Z
|
2021-09-09T15:02:55.000Z
|
setup.py
|
Mukesh23singh/aws-service-catalog-puppet
|
7d90f1533868e3ba14e3243a3d5d90ec9bca14e1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup
package_dir = \
{'': '.'}
packages = \
['servicecatalog_puppet',
'servicecatalog_puppet.commands',
'servicecatalog_puppet.template_builder',
'servicecatalog_puppet.template_builder.hub',
'servicecatalog_puppet.workflow',
'servicecatalog_puppet.workflow.apps',
'servicecatalog_puppet.workflow.assertions',
'servicecatalog_puppet.workflow.codebuild_runs',
'servicecatalog_puppet.workflow.general',
'servicecatalog_puppet.workflow.generate',
'servicecatalog_puppet.workflow.generic',
'servicecatalog_puppet.workflow.lambda_invocations',
'servicecatalog_puppet.workflow.launch',
'servicecatalog_puppet.workflow.manifest',
'servicecatalog_puppet.workflow.portfolio',
'servicecatalog_puppet.workflow.portfolio.accessors',
'servicecatalog_puppet.workflow.portfolio.associations',
'servicecatalog_puppet.workflow.portfolio.constraints_management',
'servicecatalog_puppet.workflow.portfolio.portfolio_management',
'servicecatalog_puppet.workflow.portfolio.sharing_management',
'servicecatalog_puppet.workflow.spoke_local_portfolios',
'servicecatalog_puppet.workflow.stack',
'servicecatalog_puppet.workflow.workspaces']
package_data = \
{'': ['*'], 'servicecatalog_puppet': ['manifests/*', 'templates/*']}
install_requires = \
['awacs>=1.0.2,<2.0.0',
'better-boto==0.42.0',
'boto3==1.16.56',
'cfn-flip==1.2.3',
'click==7.0',
'colorclass==2.2.0',
'deepdiff>=5.3.0,<6.0.0',
'deepmerge>=0.2.1,<0.3.0',
'jinja2==2.11.3',
'jmespath>=0.10.0,<0.11.0',
'luigi==3.0.2',
'networkx>=2.5,<3.0',
'psutil==5.7.0',
'pyyaml==5.4',
'requests==2.22.0',
'terminaltables==3.1.0',
'troposphere>=2.6.3,<3.0.0',
'yamale>=3.0.8,<4.0.0']
entry_points = \
{'console_scripts': ['servicecatalog-puppet = servicecatalog_puppet.cli:cli']}
setup_kwargs = {
'name': 'aws-service-catalog-puppet',
'version': '0.121.1',
'description': 'Making it easier to deploy ServiceCatalog products',
'long_description': '# aws-service-catalog-puppet\n\n \n\n## Badges\n\n[](https://codecov.io/gh/awslabs/aws-service-catalog-puppet)\n\n\n## What is it?\nThis is a python3 framework that makes it easier to share multi region AWS Service Catalog portfolios and makes it \npossible to provision products into accounts declaratively using a metadata based rules engine.\n\nWith this framework you define your accounts in a YAML file. You give each account a set of tags, a default region and \na set of enabled regions.\n\nOnce you have done this you can define portfolios should be shared with each set of accounts using the tags and you \ncan specify which regions the shares occur in.\n\nIn addition to this, you can also define products that should be provisioned into accounts using the same tag based \napproach. The framework will assume role into the target account and provision the product on your behalf.\n\n\n## Getting started\n\nYou can read the [installation how to](https://service-catalog-tools-workshop.com/30-how-tos/10-installation/30-service-catalog-puppet.html)\nor you can read through the [every day use](https://service-catalog-tools-workshop.com/30-how-tos/50-every-day-use.html)\nguides.\n\nYou can read the [documentation](https://aws-service-catalog-puppet.readthedocs.io/en/latest/) to understand the inner \nworkings. \n\n\n## Going further\n\nThe framework is one of a pair. The other is [aws-service-catalog-factory](https://github.com/awslabs/aws-service-catalog-factory).\nWith Service Catalog Factory you can create pipelines that deploy multi region portfolios very easily. \n\n## License\n\nThis library is licensed under the Apache 2.0 License. \n \n',
'author': 'Eamonn Faherty',
'author_email': 'aws-service-catalog-tools@amazon.com',
'maintainer': None,
'maintainer_email': None,
'url': 'https://service-catalog-tools-workshop.com/',
'package_dir': package_dir,
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.7,<4',
}
setup(**setup_kwargs)
| 54.128205
| 1,833
| 0.751303
|
300fef102127a5d14ec7ced1016af20628f141f5
| 15,338
|
py
|
Python
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/urls.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | 21,684
|
2015-01-01T03:42:20.000Z
|
2022-03-30T13:32:44.000Z
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/urls.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | 4,067
|
2015-01-01T00:04:51.000Z
|
2022-03-30T13:42:56.000Z
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/urls.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | 1,901
|
2015-01-01T21:05:59.000Z
|
2022-03-21T08:14:25.000Z
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.urls
~~~~~~~~~~~~~~~~~~~~~~~
URL helper tests.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.datastructures import OrderedMultiDict
from werkzeug import urls
from werkzeug._compat import text_type, NativeStringIO, BytesIO
class URLsTestCase(WerkzeugTestCase):
def test_replace(self):
url = urls.url_parse('http://de.wikipedia.org/wiki/Troll')
self.assert_strict_equal(url.replace(query='foo=bar'),
urls.url_parse('http://de.wikipedia.org/wiki/Troll?foo=bar'))
self.assert_strict_equal(url.replace(scheme='https'),
urls.url_parse('https://de.wikipedia.org/wiki/Troll'))
def test_quoting(self):
self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC')
self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6')
self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar')
self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar')
self.assert_strict_equal(urls.url_quote_plus('foo+bar'), 'foo%2Bbar')
self.assert_strict_equal(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar')
self.assert_strict_equal(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'),
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
self.assert_strict_equal(urls.url_quote_plus(42), '42')
self.assert_strict_equal(urls.url_quote(b'\xff'), '%FF')
def test_bytes_unquoting(self):
self.assert_strict_equal(urls.url_unquote(urls.url_quote(
u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')
def test_url_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'foo=42;bar=23;uni=H%C3%A4nsel', separator=b';')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'%C3%9Ch=H%C3%A4nsel', decode_keys=True)
self.assert_strict_equal(x[u'Üh'], u'Hänsel')
def test_url_bytes_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel', charset=None)
self.assert_strict_equal(x[b'foo'], b'42')
self.assert_strict_equal(x[b'bar'], b'23')
self.assert_strict_equal(x[b'uni'], u'Hänsel'.encode('utf-8'))
def test_streamed_url_decoding(self):
item1 = u'a' * 100000
item2 = u'b' * 400
string = ('a=%s&b=%s&c=%s' % (item1, item2, item2)).encode('ascii')
gen = urls.url_decode_stream(BytesIO(string), limit=len(string),
return_iterator=True)
self.assert_strict_equal(next(gen), ('a', item1))
self.assert_strict_equal(next(gen), ('b', item2))
self.assert_strict_equal(next(gen), ('c', item2))
self.assert_raises(StopIteration, lambda: next(gen))
def test_stream_decoding_string_fails(self):
self.assert_raises(TypeError, urls.url_decode_stream, 'testing')
def test_url_encoding(self):
self.assert_strict_equal(urls.url_encode({'foo': 'bar 45'}), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
self.assert_strict_equal(urls.url_encode(d, sort=True), 'bar=23&blah=H%C3%A4nsel&foo=1')
self.assert_strict_equal(urls.url_encode(d, sort=True, separator=u';'), 'bar=23;blah=H%C3%A4nsel;foo=1')
def test_sorted_url_encode(self):
self.assert_strict_equal(urls.url_encode({u"a": 42, u"b": 23, 1: 1, 2: 2},
sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23')
self.assert_strict_equal(urls.url_encode({u'A': 1, u'a': 2, u'B': 3, 'b': 4}, sort=True,
key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
def test_streamed_url_encoding(self):
out = NativeStringIO()
urls.url_encode_stream({'foo': 'bar 45'}, out)
self.assert_strict_equal(out.getvalue(), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True)
self.assert_strict_equal(out.getvalue(), 'bar=23&blah=H%C3%A4nsel&foo=1')
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True, separator=u';')
self.assert_strict_equal(out.getvalue(), 'bar=23;blah=H%C3%A4nsel;foo=1')
gen = urls.url_encode_stream(d, sort=True)
self.assert_strict_equal(next(gen), 'bar=23')
self.assert_strict_equal(next(gen), 'blah=H%C3%A4nsel')
self.assert_strict_equal(next(gen), 'foo=1')
self.assert_raises(StopIteration, lambda: next(gen))
def test_url_fixing(self):
x = urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
self.assert_line_equal(x, 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
x = urls.url_fix("http://just.a.test/$-_.+!*'(),")
self.assert_equal(x, "http://just.a.test/$-_.+!*'(),")
def test_url_fixing_qs(self):
x = urls.url_fix(b'http://example.com/?foo=%2f%2f')
self.assert_line_equal(x, 'http://example.com/?foo=%2f%2f')
x = urls.url_fix('http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
self.assert_equal(x, 'http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
def test_iri_support(self):
self.assert_strict_equal(urls.uri_to_iri('http://xn--n3h.net/'),
u'http://\u2603.net/')
self.assert_strict_equal(
urls.uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'),
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th')
self.assert_strict_equal(urls.iri_to_uri(u'http://☃.net/'), 'http://xn--n3h.net/')
self.assert_strict_equal(
urls.iri_to_uri(u'http://üser:pässword@☃.net/påth'),
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
self.assert_strict_equal(urls.uri_to_iri('http://test.com/%3Fmeh?foo=%26%2F'),
u'http://test.com/%3Fmeh?foo=%26%2F')
# this should work as well, might break on 2.4 because of a broken
# idna codec
self.assert_strict_equal(urls.uri_to_iri(b'/foo'), u'/foo')
self.assert_strict_equal(urls.iri_to_uri(u'/foo'), '/foo')
self.assert_strict_equal(urls.iri_to_uri(u'http://föö.com:8080/bam/baz'),
'http://xn--f-1gaa.com:8080/bam/baz')
def test_iri_safe_conversion(self):
self.assert_strict_equal(urls.iri_to_uri(u'magnet:?foo=bar'),
'magnet:?foo=bar')
self.assert_strict_equal(urls.iri_to_uri(u'itms-service://?foo=bar'),
'itms-service:?foo=bar')
self.assert_strict_equal(urls.iri_to_uri(u'itms-service://?foo=bar',
safe_conversion=True),
'itms-service://?foo=bar')
def test_iri_safe_quoting(self):
uri = 'http://xn--f-1gaa.com/%2F%25?q=%C3%B6&x=%3D%25#%25'
iri = u'http://föö.com/%2F%25?q=ö&x=%3D%25#%25'
self.assert_strict_equal(urls.uri_to_iri(uri), iri)
self.assert_strict_equal(urls.iri_to_uri(urls.uri_to_iri(uri)), uri)
def test_ordered_multidict_encoding(self):
d = OrderedMultiDict()
d.add('foo', 1)
d.add('foo', 2)
d.add('foo', 3)
d.add('bar', 0)
d.add('foo', 4)
self.assert_equal(urls.url_encode(d), 'foo=1&foo=2&foo=3&bar=0&foo=4')
def test_multidict_encoding(self):
d = OrderedMultiDict()
d.add('2013-10-10T23:26:05.657975+0000', '2013-10-10T23:26:05.657975+0000')
self.assert_equal(urls.url_encode(d), '2013-10-10T23%3A26%3A05.657975%2B0000=2013-10-10T23%3A26%3A05.657975%2B0000')
def test_href(self):
x = urls.Href('http://www.example.com/')
self.assert_strict_equal(x(u'foo'), 'http://www.example.com/foo')
self.assert_strict_equal(x.foo(u'bar'), 'http://www.example.com/foo/bar')
self.assert_strict_equal(x.foo(u'bar', x=42), 'http://www.example.com/foo/bar?x=42')
self.assert_strict_equal(x.foo(u'bar', class_=42), 'http://www.example.com/foo/bar?class=42')
self.assert_strict_equal(x.foo(u'bar', {u'class': 42}), 'http://www.example.com/foo/bar?class=42')
self.assert_raises(AttributeError, lambda: x.__blah__)
x = urls.Href('blah')
self.assert_strict_equal(x.foo(u'bar'), 'blah/foo/bar')
self.assert_raises(TypeError, x.foo, {u"foo": 23}, x=42)
x = urls.Href('')
self.assert_strict_equal(x('foo'), 'foo')
def test_href_url_join(self):
x = urls.Href(u'test')
self.assert_line_equal(x(u'foo:bar'), u'test/foo:bar')
self.assert_line_equal(x(u'http://example.com/'), u'test/http://example.com/')
self.assert_line_equal(x.a(), u'test/a')
def test_href_past_root(self):
base_href = urls.Href('http://www.blagga.com/1/2/3')
self.assert_strict_equal(base_href('../foo'), 'http://www.blagga.com/1/2/foo')
self.assert_strict_equal(base_href('../../foo'), 'http://www.blagga.com/1/foo')
self.assert_strict_equal(base_href('../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../../foo'), 'http://www.blagga.com/foo')
def test_url_unquote_plus_unicode(self):
# was broken in 0.6
self.assert_strict_equal(urls.url_unquote_plus(u'\x6d'), u'\x6d')
self.assert_is(type(urls.url_unquote_plus(u'\x6d')), text_type)
def test_quoting_of_local_urls(self):
rv = urls.iri_to_uri(u'/foo\x8f')
self.assert_strict_equal(rv, '/foo%C2%8F')
self.assert_is(type(rv), str)
def test_url_attributes(self):
rv = urls.url_parse('http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, 'http')
self.assert_strict_equal(rv.auth, 'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, 'foo%3a')
self.assert_strict_equal(rv.raw_password, 'bar%3a')
self.assert_strict_equal(rv.host, '::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, '/123')
self.assert_strict_equal(rv.query, 'x=y')
self.assert_strict_equal(rv.fragment, 'frag')
rv = urls.url_parse(u'http://\N{SNOWMAN}.com/')
self.assert_strict_equal(rv.host, u'\N{SNOWMAN}.com')
self.assert_strict_equal(rv.ascii_host, 'xn--n3h.com')
def test_url_attributes_bytes(self):
rv = urls.url_parse(b'http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, b'http')
self.assert_strict_equal(rv.auth, b'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, b'foo%3a')
self.assert_strict_equal(rv.raw_password, b'bar%3a')
self.assert_strict_equal(rv.host, b'::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, b'/123')
self.assert_strict_equal(rv.query, b'x=y')
self.assert_strict_equal(rv.fragment, b'frag')
def test_url_joining(self):
self.assert_strict_equal(urls.url_join('/foo', '/bar'), '/bar')
self.assert_strict_equal(urls.url_join('http://example.com/foo', '/bar'),
'http://example.com/bar')
self.assert_strict_equal(urls.url_join('file:///tmp/', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', '../../../x.html'),
'file:///x.html')
def test_partial_unencoded_decode(self):
ref = u'foo=정상처리'.encode('euc-kr')
x = urls.url_decode(ref, charset='euc-kr')
self.assert_strict_equal(x['foo'], u'정상처리')
def test_iri_to_uri_idempotence_ascii_only(self):
uri = u'http://www.idempoten.ce'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_iri_to_uri_idempotence_non_ascii(self):
uri = u'http://\N{SNOWMAN}/\N{SNOWMAN}'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_uri_to_iri_idempotence_ascii_only(self):
uri = 'http://www.idempoten.ce'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_uri_to_iri_idempotence_non_ascii(self):
uri = 'http://xn--n3h/%E2%98%83'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_iri_to_uri_to_iri(self):
iri = u'http://föö.com/'
uri = urls.iri_to_uri(iri)
self.assert_equal(urls.uri_to_iri(uri), iri)
def test_uri_to_iri_to_uri(self):
uri = 'http://xn--f-rgao.com/%C3%9E'
iri = urls.uri_to_iri(uri)
self.assert_equal(urls.iri_to_uri(iri), uri)
def test_uri_iri_normalization(self):
uri = 'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93'
iri = u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713'
tests = [
u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713',
u'http://xn--f-rgao.com/\u2610/fred?utf8=\N{CHECK MARK}',
b'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://föñ.com/\u2610/fred?utf8=%E2%9C%93',
b'http://xn--f-rgao.com/\xe2\x98\x90/fred?utf8=\xe2\x9c\x93',
]
for test in tests:
self.assert_equal(urls.uri_to_iri(test), iri)
self.assert_equal(urls.iri_to_uri(test), uri)
self.assert_equal(urls.uri_to_iri(urls.iri_to_uri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.uri_to_iri(test)), uri)
self.assert_equal(urls.uri_to_iri(urls.uri_to_iri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.iri_to_uri(test)), uri)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(URLsTestCase))
return suite
| 47.486068
| 128
| 0.617877
|
7e425ed2cc71e7582ea83b9ba604aa13f4cc9f3e
| 3,111
|
py
|
Python
|
test/test_pipeline/components/feature_preprocessing/test_extra_trees_regression.py
|
tuggeluk/auto-sklearn
|
202918e5641701c696b995039d06bfec81973cc6
|
[
"BSD-3-Clause"
] | 1
|
2017-08-13T13:57:40.000Z
|
2017-08-13T13:57:40.000Z
|
test/test_pipeline/components/feature_preprocessing/test_extra_trees_regression.py
|
chrinide/auto-sklearn
|
1c6af59ff61f1d0a3b54b16a35ffbc5d2d3828cd
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_pipeline/components/feature_preprocessing/test_extra_trees_regression.py
|
chrinide/auto-sklearn
|
1c6af59ff61f1d0a3b54b16a35ffbc5d2d3828cd
|
[
"BSD-3-Clause"
] | 1
|
2019-06-18T15:40:37.000Z
|
2019-06-18T15:40:37.000Z
|
import unittest
from sklearn.ensemble import ExtraTreesRegressor
from autosklearn.pipeline.components.feature_preprocessing.\
extra_trees_preproc_for_regression import \
ExtraTreesPreprocessorRegression
from autosklearn.pipeline.util import _test_preprocessing, \
PreprocessingTestCase, get_dataset
import sklearn.metrics
class ExtreTreesRegressionComponentTest(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(
ExtraTreesPreprocessorRegression)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_regression(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston',
make_sparse=False)
configuration_space = ExtraTreesPreprocessorRegression.\
get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = ExtraTreesPreprocessorRegression(
random_state=1,
**{hp_name: default[hp_name] for hp_name in default})
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a regressor on top
regressor = ExtraTreesRegressor(random_state=1)
predictor = regressor.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.mean_squared_error(predictions, Y_test)
self.assertAlmostEqual(accuracy, 20.193400000000004, places=2)
def test_default_configuration_classify_sparse(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston',
make_sparse=True)
configuration_space = ExtraTreesPreprocessorRegression.\
get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = ExtraTreesPreprocessorRegression(
random_state=1,
**{hp_name: default[hp_name] for hp_name in default})
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a regressor on top
regressor = ExtraTreesRegressor(random_state=1)
predictor = regressor.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.mean_squared_error(predictions, Y_test)
self.assertAlmostEqual(accuracy, 62.485374939528718, places=2)
def test_preprocessing_dtype(self):
super(ExtreTreesRegressionComponentTest, self).\
_test_preprocessing_dtype(ExtraTreesPreprocessorRegression)
| 49.380952
| 78
| 0.671488
|
977c2deb3e5aa3ed6f17d14e8a7f08031641ed4d
| 15,048
|
py
|
Python
|
main_test2.py
|
jaeseoko/CMU-Robot-Desin
|
0bbbdc8dbfb8c7758ec8aaae3fc9c1bd82721b19
|
[
"MIT"
] | 1
|
2021-04-11T18:24:03.000Z
|
2021-04-11T18:24:03.000Z
|
main_test2.py
|
jaeseoko/CMU-Robot-Desin
|
0bbbdc8dbfb8c7758ec8aaae3fc9c1bd82721b19
|
[
"MIT"
] | null | null | null |
main_test2.py
|
jaeseoko/CMU-Robot-Desin
|
0bbbdc8dbfb8c7758ec8aaae3fc9c1bd82721b19
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
from time import sleep
import numpy as np
import Encoder
import threading
import signal
import sys
import pybullet as p
import argparse
import time
# import keyboard
# For pybullet loading urdf to calculate inverse dynamics / Motor Params
def saveTorque(myArray):
# x = [['ABC', 123.45], ['DEF', 678.90]]
np.savetxt('torqueLog.txt', myArray, fmt='%s')
# default dtype for np.loadtxt is also floating point, change it, to be able to load mixed data.
# y = np.loadtxt('text.txt', dtype=np.object)
def SetUp():
global ee_mass, bodyId
client = p.connect(p.DIRECT)
p.setGravity(0, 0, -9.81, physicsClientId=client)
flags = p.URDF_USE_SELF_COLLISION
bodyId = p.loadURDF("./data/Arm_Final_Planet_hook/urdf/Arm_Final_Planet_hook.urdf",
basePosition=[0,0,0],useFixedBase=True,flags=flags)
maxForce = 0
p.setJointMotorControl2(bodyId, 0,controlMode=p.VELOCITY_CONTROL, force=maxForce)
p.setJointMotorControl2(bodyId, 1,controlMode=p.VELOCITY_CONTROL, force=maxForce)
p.setJointMotorControl2(bodyId, 2,controlMode=p.VELOCITY_CONTROL, force=maxForce)
# end-effector mass in bullet.
ee_mass = p.getDynamicsInfo(bodyId,3)[0]
parser = argparse.ArgumentParser()
parser.add_argument('a0',
type=float,
help='target end effector joint angle 0')
parser.add_argument('a1',
type=float,
help='target end effector joint angle 1')
parser.add_argument('a2',
type=float,
help='target end effector joint angle 2')
parser.add_argument('--load',
type=float,
help='weight to lift')
parser.add_argument('--worm',
type=int,
help='set if worm gear used or not,0: planetary 1: worm gear')
args = parser.parse_args()
targetORN = [args.a0*np.pi/180,args.a1*np.pi/180,args.a2*np.pi/180]
destORN = [args.a0*np.pi/180 + np.pi/2,args.a1*np.pi/180,args.a2*np.pi/180]
prev_pos = [0,-(85)*np.pi/180,0]
off = [0,-(85)*np.pi/180,0]
prev_error = [0,0,0]
cum_e = [0,0,0]
load = args.load
logTorque = []
if args.worm==0:
worm = False
else:
worm = True
picked, placed = False, False
offset = False
# import time
start_time = time.time()
# end = time.time()
return targetORN,destORN,prev_pos,prev_error,cum_e,load,picked,placed,offset,worm, off, logTorque, start_time
def checkPoint(error,vel,status):
tol = 0.1
if( status == False and np.linalg.norm(np.asarray(error),axis=0) < tol and
np.linalg.norm(np.asarray(vel),axis=0) < tol):
status = True
return status
def GetVoltage(torque,vel):
Ts = 23.5/1000 # Nm (stall torque)
Is = 1.8 # A (stall current)
R = 8.4 # Ohm
V = 12 # Voltage [V]
noLoadCurr = 70/1000 # A
noLoadSpeed = 7000*2*np.pi/60 # rad / s
N = 270
Kt = Ts/Is
Ke = (V - R*noLoadCurr)/noLoadSpeed
V0 = R/Kt*torque[0]/N +Ke*vel[0]*N
V1 = R/Kt*torque[1]/N +Ke*vel[1]*N
V2 = R/Kt*torque[2]/N +Ke*vel[2]*N
return [V0,V1,V2]
def GetVoltageWorm(torque,vel):
# PLANETARY for first and second
Ts = 23.5/1000 # Nm (stall torque)
Is = 1.8 # A (stall current)
R = 8.4 # Ohm
V = 12 # Voltage [V]
noLoadCurr = 70/1000 # A
noLoadSpeed = 7000*2*np.pi/60 # rad / s
N = 270
Kt = Ts/Is
Ke = (V - R*noLoadCurr)/noLoadSpeed
# WORM for third
Ts2 = 70/1000 # Nm (stall torque)
Is2 = 5.2 # A (stall current)
R2 = 8.4 # Ohm
V2 = 24 # Voltage [V]
noLoadCurr2 = 0.25 # A
noLoadSpeed2 = 16*2*np.pi/60 # rad / s
N2 = 5002
Kt2 = Ts2/Is2
Ke2 = (V2 - R2*noLoadCurr2)/noLoadSpeed2
V0 = R/Kt*torque[0]/N +Ke*vel[0]*N
V1 = R/Kt*torque[1]/N +Ke*vel[1]*N
V2 = R2/Kt2*torque[2]/N2 +Ke2*vel[2]*N2
return [V0,V1,V2]
def PID_torque(e,de,cum_e,load):
# kp0,ki0,kd0 = 2e-2, 1e-8 , 2e-2
kp0,ki0,kd0 = 9e-2, 1e-8 , 9e-2
# kp1,ki1,kd1 = 3e-2, 1e-7 , 4e-2
kp1,ki1,kd1 = 5.7, 1e-3 , 5.9
# kp2,ki2,kd2 = 2e-2, 1e-4 , 2e-2
kp2,ki2,kd2 = 9e-1, 1e-3 , 9e-1
if(load!=0):
kp0*=(1+ 10.5*load)
ki0*=(1+ 5**5*load)
kd0*=(1+ 15*load)
kp1*=(1+ 1000.6*load)
ki1*=(1+ 5**6*load)
kd1*=(1+ 805*load)
kp2*=(1+ 7.025*load)
ki2*=(1+ 7.5*load)
kd2*=(1+ 7.025*load)
T0 = kp0*(e[0]) + kd0*(de[0]) + ki0*cum_e[0]
T1 = kp1*(e[1]) + kd1*(de[1]) + ki1*cum_e[1]
T2 = kp2*(e[2]) + kd2*(de[2]) + ki2*cum_e[2]
return [T0,T1,T2]
# For GPIO clean exit
def signal_handler(sig, frame):
print('Cleaning GPIO and Exiting the program...')
exitRoutine()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Motor--------------------------------------
pwm_frequency = 1000
encoder_count_per_rotation = 810
V = 12
# GPIOs--------------------------------------
# First Motor related
motor_driver_1_reverse_enable_pin = 6 # GPIO 4
motor_driver_1_forward_enable_pin = 13 # GPIO 17
motor_driver_1_reverse_pwm_pin = 19 # GPIO 27
motor_driver_1_forward_pwm_pin = 26 # GPIO 22
motor_1_Encoder_A_pin = 12 # GPIO 18
motor_1_Encoder_B_pin = 16 # GPIO 23
# Second Motor related
motor_driver_2_reverse_enable_pin = 10 # GPIO 10
motor_driver_2_forward_enable_pin = 9 # GPIO 9
motor_driver_2_reverse_pwm_pin = 11 # GPIO 11
motor_driver_2_forward_pwm_pin = 5 # GPIO 5
motor_2_Encoder_A_pin = 24 # GPIO 24
motor_2_Encoder_B_pin = 25 # GPIO 25
# Third Motor related
motor_driver_3_reverse_enable_pin = 4 # GPIO 6
motor_driver_3_forward_enable_pin = 17 # GPIO 13
motor_driver_3_reverse_pwm_pin = 27 # GPIO 19
motor_driver_3_forward_pwm_pin = 22 # GPIO 26
motor_3_Encoder_A_pin = 18 # GPIO 12
motor_3_Encoder_B_pin = 23 # GPIO 16
# GPIO initialization--------------------------------------
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# First Motor related
GPIO.setup(motor_driver_1_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_1_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_1_Encoder_B_pin, GPIO.IN)
motor_1_encoder = Encoder.Encoder(motor_1_Encoder_A_pin, motor_1_Encoder_B_pin)
motor_driver_1_reverse_pwm = GPIO.PWM(motor_driver_1_reverse_pwm_pin, pwm_frequency)
motor_driver_1_forward_pwm = GPIO.PWM(motor_driver_1_forward_pwm_pin, pwm_frequency)
# Second Motor related
GPIO.setup(motor_driver_2_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_2_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_2_Encoder_B_pin, GPIO.IN)
motor_2_encoder = Encoder.Encoder(motor_2_Encoder_A_pin, motor_2_Encoder_B_pin)
motor_driver_2_reverse_pwm = GPIO.PWM(motor_driver_2_reverse_pwm_pin, pwm_frequency)
motor_driver_2_forward_pwm = GPIO.PWM(motor_driver_2_forward_pwm_pin, pwm_frequency)
# Third Motor related
GPIO.setup(motor_driver_3_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_3_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_3_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_3_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_3_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_3_Encoder_B_pin, GPIO.IN)
motor_3_encoder = Encoder.Encoder(motor_3_Encoder_A_pin, motor_3_Encoder_B_pin)
motor_driver_3_reverse_pwm = GPIO.PWM(motor_driver_3_reverse_pwm_pin, pwm_frequency)
motor_driver_3_forward_pwm = GPIO.PWM(motor_driver_3_forward_pwm_pin, pwm_frequency)
# End of initialization--------------------------------------
def rotateCCW(motor, voltage):
global motor_driver_1_forward_pwm
global motor_driver_2_forward_pwm
global motor_driver_3_forward_pwm
global V
pwm_percent = 0
if(voltage > 12):
pwm_percent = 100
else:
pwm_percent = voltage / V * 100
if(motor == 0):
motor_driver_1_forward_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 1):
motor_driver_2_forward_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 2):
motor_driver_3_forward_pwm.ChangeDutyCycle(pwm_percent)
def rotateCW(motor, voltage):
global motor_driver_1_reverse_pwm
global motor_driver_2_reverse_pwm
global motor_driver_3_reverse_pwm
global V
pwm_percent = 0
if(voltage > 12):
pwm_percent = 100
else:
pwm_percent = voltage / V * 100
if(motor == 0):
motor_driver_1_reverse_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 1):
motor_driver_2_reverse_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 2):
motor_driver_3_reverse_pwm.ChangeDutyCycle(pwm_percent)
def stopRotate(motor):
rotateCW(motor, 0)
rotateCCW(motor, 0)
def getEncoderPosition(encoder):
global motor_1_encoder
global motor_2_encoder
global motor_3_encoder
global encoder_count_per_rotation
if(encoder == 0):
return 2* np.pi * (motor_1_encoder.read() / 10) / (encoder_count_per_rotation) # rad
elif (encoder == 1):
return 2* np.pi * (motor_2_encoder.read() / 10) / (encoder_count_per_rotation) # rad
elif (encoder == 2):
return 2* np.pi * (motor_3_encoder.read() / 10) / (encoder_count_per_rotation) # rad
def getEncoderVelocity(encoder_position, prev_pos, dt):
return (encoder_position - prev_pos) / (dt) # rad/s
def exitRoutine():
GPIO.cleanup()
dt = 0.05 #50ms
prev_pos = 0
GPIO.output(motor_driver_1_reverse_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_1_forward_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_2_reverse_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_2_forward_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_3_reverse_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_3_forward_enable_pin, GPIO.HIGH)
motor_driver_1_forward_pwm.start(0)
motor_driver_1_reverse_pwm.start(0)
motor_driver_2_forward_pwm.start(0)
motor_driver_2_reverse_pwm.start(0)
motor_driver_3_forward_pwm.start(0)
motor_driver_3_reverse_pwm.start(0)
# rotateCW(0, 12)
# pause = 0
targetORN, destORN, prev_pos, prev_error, cum_e, load, picked, placed, offset, worm, off , logTorque, start_time= SetUp()
def main():
global targetORN, destORN, prev_pos, prev_error, cum_e, load, picked, placed, offset, worm, off
pos = [getEncoderPosition(0) +off[0],-getEncoderPosition(1) +off[1],getEncoderPosition(2)+off[2]]
print("checking getEncoderPosition fun: ",getEncoderPosition(0),
getEncoderPosition(1),
getEncoderPosition(2))
print("--------------------------------------------")
vel = [getEncoderVelocity(pos[0], prev_pos[0], dt),
getEncoderVelocity(pos[1], prev_pos[1], dt),
getEncoderVelocity(pos[2], prev_pos[2], dt)]
vel[1]*=-1
# print("--------------------------------------------")
# if offset ==False:
# targetORN[2]-=10*np.pi/180
# offset = True
# error = [targetORN[0]-pos[0],targetORN[1]-pos[1],targetORN[2]-pos[2] ]
error = [targetORN[0]-pos[0],-targetORN[1]+pos[1],targetORN[2]-pos[2] ]
print("errors: ",error[0]*180/np.pi,error[1]*180/np.pi,error[2]*180/np.pi)
de = [error[0] - prev_error[0],error[1] - prev_error[1],error[2] - prev_error[2] ]
cum_e+=error
cum_e = [cum_e[0]+error[0],cum_e[1]+error[1],cum_e[2]+error[2]]
if picked == False:
pidTorques = PID_torque(error, de, cum_e, 0)
picked = checkPoint(error, vel, picked)
if True==checkPoint(error, vel, picked):
picked = True
targetORN = destORN
if picked == True:
pidTorques = PID_torque(error, de, cum_e, load)
# placed = checkPoint(error, vel, placed)
if True==checkPoint(error, vel, placed):
placed = True
print("Reached goal destination.")
tau0,tau1,tau2 = p.calculateInverseDynamics(bodyId,
[pos[0],pos[1],pos[2]],
[vel[0],vel[1],vel[2]],
[0,0,0])
print("--------------------------------------------")
print("from bullet, torq 1: ",tau1)
print("--------------------------------------------")
tau1=tau1 + 0.2*tau1
torque = [pidTorques[0]+tau0,pidTorques[1]+tau1,pidTorques[2]+tau2]
print("torques = ", torque)
print("--------------------------------------------")
if worm==True:
volt = GetVoltageWorm(torque, vel)
# Turning off
if ( abs(vel[1])+abs(vel[2]) < 0.02 ) and \
( (abs(error[1]+abs(error[2])) ) < 0.5 ) :
stopRotate(2)
GPIO.output(motor_driver_3_reverse_enable_pin, GPIO.LOW)
GPIO.output(motor_driver_3_forward_enable_pin, GPIO.LOW)
else:
volt = GetVoltage(torque,vel)
# volt = [0,0,0]
print("volt = ", volt)
print("--------------------------------------------")
# if(volt[0]>0): rotateCW(0, volt[0])
# else: rotateCCW(0, abs(volt[0]))
# if(volt[1]<0): rotateCW(1, abs(volt[1]))
# else: rotateCCW(1, volt[1])
# if picked==True and worm == True:
# stopRotate(2)
# elif(volt[2]<0):
# rotateCW(2, abs(volt[2]))
# else:
# rotateCCW(2, volt[2])
if(volt[0]>0): rotateCW(0, abs(volt[0]))
else: rotateCCW(0, abs(volt[0]))
if(volt[1]>0): rotateCW(1, abs(volt[1]))
else: rotateCCW(1, abs(volt[1]))
# if picked==True and worm == True:
# stopRotate(2)
if(volt[2]>0):
rotateCW(2, abs(volt[2]))
else:
rotateCCW(2, abs(volt[2]))
print("position 0: " + str(pos[0]*180/np.pi) + ". velocity 0: " + str(vel[0]) + ".")
print("position 1: " + str(pos[1]*180/np.pi) + ". velocity 1: " + str(vel[1]) + ".")
print("position 2: " + str(pos[2]*180/np.pi) + ". velocity 2: " + str(vel[2]) + ".")
print("-----------------------------------------------------------------")
prev_pos = pos
prev_error = error
now = time.time()
torque.append(now-start_time)
logTorque.append(torque)
saveTorque(np.asarray(logTorque))
threading.Timer(dt, main).start()
main()
| 33.44
| 121
| 0.600545
|
7f14a4bfb12853a145bca214769886e13c9c902a
| 289
|
py
|
Python
|
molecule/default/tests/test_default.py
|
slated/ansible-python-roles
|
83b14ddbc4348dd8d55958b47c520fb46e97b641
|
[
"Apache-2.0"
] | null | null | null |
molecule/default/tests/test_default.py
|
slated/ansible-python-roles
|
83b14ddbc4348dd8d55958b47c520fb46e97b641
|
[
"Apache-2.0"
] | null | null | null |
molecule/default/tests/test_default.py
|
slated/ansible-python-roles
|
83b14ddbc4348dd8d55958b47c520fb46e97b641
|
[
"Apache-2.0"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_python_3_is_installed(host):
python_3 = host.package('python3.6')
assert python_3.is_installed
| 22.230769
| 63
| 0.788927
|
1e13be2b603ac6483cfd751b7bff0c3d6980d540
| 93
|
py
|
Python
|
StreamApp/apps.py
|
felixfaisal/StreamApp
|
9ba93f7af389eef3d4334f2a04dca5ab84aa59e8
|
[
"MIT"
] | null | null | null |
StreamApp/apps.py
|
felixfaisal/StreamApp
|
9ba93f7af389eef3d4334f2a04dca5ab84aa59e8
|
[
"MIT"
] | null | null | null |
StreamApp/apps.py
|
felixfaisal/StreamApp
|
9ba93f7af389eef3d4334f2a04dca5ab84aa59e8
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class StreamappConfig(AppConfig):
name = 'StreamApp'
| 15.5
| 33
| 0.763441
|
f3ccda420d7efceeb8aeaebd223b01396b24e1c2
| 1,714
|
py
|
Python
|
src/settings.py
|
jjmartinez-taiger/libreCatastro
|
b40bfa375095411814b99ac839959418a83c3071
|
[
"MIT"
] | 9
|
2019-11-30T11:14:30.000Z
|
2022-03-14T09:41:57.000Z
|
src/settings.py
|
jjmartinez-taiger/libreCatastro
|
b40bfa375095411814b99ac839959418a83c3071
|
[
"MIT"
] | 1
|
2021-04-11T17:15:08.000Z
|
2021-04-11T17:15:08.000Z
|
src/settings.py
|
jjmartinez-taiger/libreCatastro
|
b40bfa375095411814b99ac839959418a83c3071
|
[
"MIT"
] | 1
|
2021-08-07T12:15:43.000Z
|
2021-08-07T12:15:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from os import environ
""" Dict settings file with config parameters"""
root_path = os.path.dirname(os.path.abspath(__file__))
config = {
"separator": "####",
"elasticsearch-index": "cadaster",
"elasticsearch-doc": "cadaster_doc",
"error_log_file": os.path.join(root_path, 'logs', 'log'),
"tracking_log_file": os.path.join(root_path, 'logs', 'track'),
"scale": 10000,
"coordinates_path": os.path.join(root_path, 'coordinates'),
"not_available_via_XML": "(Not available via XML)",
"sleep_time": 5,
"sleep_dos_time": 300,
"width_px": 120,
"height_px": 120,
"elasticsearch-host": environ.get('ES_HOST') if environ.get('ES_HOST') is not None else "localhost",
"elasticsearch-port": environ.get('ES_PORT') if environ.get('ES_PORT') is not None else "9200",
"servers_down_message_001": "Error 001: Cadastro server to get provinces and cities is down.\n"
"Consequence: Search by provinces will fail.\n"
"Maintenance is usually carried out durign the night or the weekends. Please, retry later.\n"
"As an alternative, your IP address may have been banned. Try to change your public IP",
"servers_down_message_002": "Error 002: Cadastro server to query by cadaster number is off.\n"
"Search by Coordinates will fail.\n"
"Maintenance is usually carried out durign the night or the weekends. Please, retry later.\n"
"As an alternative, your IP address may have been banned. Try to change your public IP\n"
}
| 47.611111
| 125
| 0.628355
|
6c1680d95dc512c40da6b5334ec0defc84460dd8
| 78
|
py
|
Python
|
version.py
|
shinji-s/scrapy-cluster
|
0153bf61f46068e61f76798147a850769a190b95
|
[
"MIT"
] | 1,108
|
2015-04-15T16:02:26.000Z
|
2022-03-31T11:46:29.000Z
|
version.py
|
shinji-s/scrapy-cluster
|
0153bf61f46068e61f76798147a850769a190b95
|
[
"MIT"
] | 246
|
2015-07-08T18:37:12.000Z
|
2021-06-28T14:33:51.000Z
|
version.py
|
shinji-s/scrapy-cluster
|
0153bf61f46068e61f76798147a850769a190b95
|
[
"MIT"
] | 382
|
2015-04-20T07:16:05.000Z
|
2022-03-21T11:34:59.000Z
|
__version__ = '1.3.0'
VERSION = tuple(int(x) for x in __version__.split('.'))
| 26
| 55
| 0.666667
|
d393c1b4e86c38ea4f527caaf28ca2a007899318
| 4,797
|
py
|
Python
|
Projects/PlantMaker/archive/20100520/src/order.py
|
fredmorcos/attic
|
0da3b94aa525df59ddc977c32cb71c243ffd0dbd
|
[
"Unlicense"
] | 2
|
2021-01-24T09:00:51.000Z
|
2022-01-23T20:52:17.000Z
|
Projects/PlantMaker/archive/20100520/src/order.py
|
fredmorcos/attic
|
0da3b94aa525df59ddc977c32cb71c243ffd0dbd
|
[
"Unlicense"
] | 6
|
2020-02-29T01:59:03.000Z
|
2022-02-15T10:25:40.000Z
|
Projects/PlantMaker/archive/20100520/src/order.py
|
fredmorcos/attic
|
0da3b94aa525df59ddc977c32cb71c243ffd0dbd
|
[
"Unlicense"
] | 1
|
2019-03-22T14:41:21.000Z
|
2019-03-22T14:41:21.000Z
|
"""
This module provides classes to build an OrderList for input to a plant,
including the Order instances and their Recipe instances.
"""
from xml.dom import minidom
class Recipe(object):
"""
This class provides a Recipe for an Order. It is a list (or dictionary) of
tuples (str machineName, int timeAtMachine).
"""
def __init__(self):
"""
recipe is a list (or dictionary) that contains the tuples of time
information for the Recipe.
"""
self.recipe = []
def indexOfMachine(self, machineName):
"""
Returns the index of the Machine with machineName in the recipe list.
"""
for i, r in enumerate(self.recipe):
if r[0] == machineName:
return i
return -1
def calcMinProcTime(self, plant, machineName = None):
"""
This method calculates the minimum processing time of the Recipe
starting from Machine with machineName (Considers the constant plant
delays for the crane movement time between machines).
"""
if machineName == None or machineName == "":
index = 0
else:
index = self.indexOfMachine(machineName)
res = (len(self.recipe) - 1 - index) * plant.craneMoveTime
while index < len(self.recipe):
res += self.recipe[index][1]
if self.recipe[index][1] == 0:
res -= plant.craneMoveTime
index += 1
return res
def __getitem__(self, key):
"""
Returns the time in the Recipe at Machine with name key.
"""
assert type(key) == str or type(key) == unicode
for r in self.recipe:
if r[0] == key:
return r[1]
return None
def __setitem__(self, key, value):
"""
Adds a Recipe item (a tuple of (str machineName, int time)) to the
Recipe list (or dictionary). It will not add the item if machineName
is already in the list.
"""
assert type(key) == str or type(key) == unicode
assert type(value) == int
if self.__getitem__(key) == None:
self.recipe.append([key, value])
else:
for i, r in enumerate(self.recipe):
if r[0] == key:
del self.recipe[i]
self.recipe.insert(i, [key, value])
return
@staticmethod
def fromXml(element):
"""
A static method that creates a Recipe instance from an XML tree node
and returns it.
"""
recipe = Recipe()
for e in element.getElementsByTagName("machine"):
recipe[e.getAttribute("name").lower()] = int(e.getAttribute("time"))
return recipe
class Order(object):
"""
This class provides an Order with id, deadline and recipe.
"""
def __init__(self, id = 0, deadline = 0, currentMachine = ""):
"""
id is a unique int for the Order.
deadline is the int deadline for the Order.
recipe is the Recipe instance for the order.
"""
assert deadline >= 0
assert id >= 0
self.id = id
self.deadline = deadline
self.recipe = None
self.currentMachine = currentMachine
def __eq__(self, o):
return o.id == self.id
def __repr__(self):
return "Order " + str(self.id)
@staticmethod
def fromXml(element, plant):
"""
A static method that creates an Order instance from an XML tree node
and returns it. The number of children (Recipe instances) of the node
have to be exactly one since each Order can only have a single Recipe.
"""
assert len(element.getElementsByTagName("recipe")) == 1
order = Order(
deadline = int(element.getAttribute("deadline")),
id = int(element.getAttribute("id")),
currentMachine = element.getAttribute("current_machine").lower()
)
order.recipe = Recipe.fromXml(element.getElementsByTagName("recipe")[0])
if order.currentMachine != "":
order.recipe[order.currentMachine] = \
abs(int(element.getAttribute("current_overtime")))
return order
class OrderList(object):
"""
This class provides a list of Order instances.
"""
def __init__(self):
"""
orders is a list of Order instances.
"""
self.orders = []
@staticmethod
def fromXml(xmlDoc, plant):
"""
A static method that creates an OrderList instance from an XML tree
node and returns it.
"""
orderList = OrderList()
for e in xmlDoc.getElementsByTagName("order"):
orderList.addOrder(Order.fromXml(e, plant))
return orderList
@staticmethod
def fromXmlFile(filename, plant):
"""
A static method that loads an OrderList from a file (str filename) and
returns an instance.
"""
file = open(filename, "r")
doc = minidom.parse(file)
orderList = OrderList.fromXml(doc, plant)
file.close()
return orderList
def addOrder(self, order):
"""
Adds an Order to the OrderList. The Order instance and the Order.id
cannot be already in the list.
"""
assert order not in self.orders
for o in self.orders:
if o.id == order.id:
raise Exception("Order id already in order list")
self.orders.append(order)
def orderFromID(self, id):
for order in self.orders:
if order.id == id:
return order
return None
| 26.213115
| 76
| 0.682927
|
2ef50b30ecd92646fdd2872b0d0db5c492ad9ad2
| 18,993
|
py
|
Python
|
topfarm/constraint_components/boundary_component.py
|
DTUWindEnergy/TopFarm2
|
cba70b20431f7a828370447117fe2e7533edf7c2
|
[
"MIT"
] | 4
|
2019-02-18T08:46:00.000Z
|
2021-01-28T06:35:52.000Z
|
topfarm/constraint_components/boundary_component.py
|
DTUWindEnergy/TopFarm2
|
cba70b20431f7a828370447117fe2e7533edf7c2
|
[
"MIT"
] | 1
|
2019-11-26T12:12:12.000Z
|
2019-11-26T12:12:12.000Z
|
topfarm/constraint_components/boundary_component.py
|
DTUWindEnergy/TopFarm2
|
cba70b20431f7a828370447117fe2e7533edf7c2
|
[
"MIT"
] | 8
|
2019-01-14T09:33:26.000Z
|
2021-06-30T11:56:03.000Z
|
import numpy as np
from openmdao.api import ExplicitComponent
from scipy.spatial import ConvexHull
import sys
# ==============================================================================
# This module is deprecated use topfarm.constraint_components.boundary instead
# ==============================================================================
def BoundaryComp(n_wt, xy_boundary, z_boundary=None, xy_boundary_type='convex_hull'):
if xy_boundary_type == 'polygon':
return PolygonBoundaryComp(n_wt, xy_boundary, z_boundary)
else:
return ConvexBoundaryComp(n_wt, xy_boundary, z_boundary, xy_boundary_type)
class BoundaryBaseComp(ExplicitComponent):
def __init__(self, n_wt, xy_boundary=None, z_boundary=None, **kwargs):
sys.stderr.write("%s is deprecated. Use BoundaryConstraint from topfarm.constraint_components.boundary instead\n" % self.__class__.__name__)
ExplicitComponent.__init__(self, **kwargs)
self.n_wt = n_wt
if xy_boundary is None:
self.xy_boundary = np.zeros((0, 2))
else:
self.xy_boundary = np.array(xy_boundary)
if z_boundary is None:
z_boundary = []
if len(z_boundary) > 0:
z_boundary = np.asarray(z_boundary)
assert z_boundary.shape[-1] == 2
if len(z_boundary.shape) == 1:
z_boundary = np.zeros((self.n_wt, 2)) + [z_boundary]
assert z_boundary.shape == (self.n_wt, 2)
assert np.all(z_boundary[:, 0] < z_boundary[:, 1])
self.z_boundary = z_boundary
# def setup_as_constraints(self, problem):
# if len(self.xy_boundary) > 0:
# problem.model.add_subsystem('xy_bound_comp', self, promotes=['*'])
# problem.model.add_constraint('boundaryDistances', lower=np.zeros(self.nVertices * self.n_wt))
# if len(self.z_boundary):
# problem.model.add_constraint(topfarm.z_key, lower=self.z_boundary[:, 0], upper=self.z_boundary[:, 1])
#
# def setup_as_penalty(self, problem, penalty=1e10):
# if len(self.xy_boundary) == 0 and len(self.z_boundary) == 0:
# return # no boundary or hub-height constraints
#
# if len(self.xy_boundary) > 0:
# subsystem_order = [ss.name for ss in problem.model._static_subsystems_allprocs]
# problem.model.add_subsystem('xy_bound_comp', self, promotes=['*'])
# subsystem_order.insert(subsystem_order.index('cost_comp'), 'xy_bound_comp')
# problem.model.set_order(subsystem_order)
#
# def xy_boundary_penalty(inputs):
# return -np.minimum(inputs['boundaryDistances'], 0).sum()
# else:
# def xy_boundary_penalty(inputs):
# return 0
#
# if len(self.z_boundary):
# def z_boundary_penalty(inputs):
# return -np.minimum(inputs[topfarm.z_key] - self.z_boundary[:, 0], 0).sum() + np.maximum(inputs[topfarm.z_key] - self.z_boundary[:, 1], 0).sum()
# else:
# def z_boundary_penalty(inputs):
# return 0
#
# self._cost_comp = problem.cost_comp
# self._org_setup = self._cost_comp.setup
# self._org_compute = self._cost_comp.compute
#
# def new_setup():
# self._org_setup()
# if len(self.xy_boundary) > 0:
# self._cost_comp.add_input('boundaryDistances', val=self.zeros)
#
# self._cost_comp.setup = new_setup
#
# def new_compute(inputs, outputs):
# p = xy_boundary_penalty(inputs) + z_boundary_penalty(inputs)
# if p == 0:
# self._org_compute(inputs, outputs)
# else:
# outputs['cost'] = penalty + p
# self._cost_comp.compute = new_compute
# problem._mode = 'rev'
class ConvexBoundaryComp(BoundaryBaseComp):
def __init__(self, n_wt, xy_boundary=None, z_boundary=None, xy_boundary_type='convex_hull'):
super().__init__(n_wt, xy_boundary, z_boundary)
if len(self.xy_boundary):
self.boundary_type = xy_boundary_type
self.calculate_boundary_and_normals()
self.calculate_gradients()
self.zeros = np.zeros([self.n_wt, self.nVertices])
else:
self.zeros = np.zeros([self.n_wt, 0])
def calculate_boundary_and_normals(self):
if self.boundary_type == 'convex_hull':
# find the points that actually comprise a convex hull
hull = ConvexHull(list(self.xy_boundary))
# keep only xy_vertices that actually comprise a convex hull and arrange in CCW order
self.xy_boundary = self.xy_boundary[hull.vertices]
elif self.boundary_type == 'square':
min_ = self.xy_boundary.min(0)
max_ = self.xy_boundary.max(0)
range_ = (max_ - min_)
x_c, y_c = min_ + range_ / 2
r = range_.max() / 2
self.xy_boundary = np.array([(x_c - r, y_c - r), (x_c + r, y_c - r), (x_c + r, y_c + r), (x_c - r, y_c + r)])
elif self.boundary_type == 'rectangle':
min_ = self.xy_boundary.min(0)
max_ = self.xy_boundary.max(0)
range_ = (max_ - min_)
x_c, y_c = min_ + range_ / 2
r = range_ / 2
self.xy_boundary = np.array([(x_c - r[0], y_c - r[1]), (x_c + r[0], y_c - r[1]), (x_c + r[0], y_c + r[1]), (x_c - r[0], y_c + r[1])])
else:
raise NotImplementedError("Boundary type '%s' is not implemented" % self.boundary_type)
# get the real number of xy_vertices
self.nVertices = self.xy_boundary.shape[0]
# initialize normals array
unit_normals = np.zeros([self.nVertices, 2])
# determine if point is inside or outside of each face, and distances from each face
for j in range(0, self.nVertices):
# calculate the unit normal vector of the current face (taking points CCW)
if j < self.nVertices - 1: # all but the set of point that close the shape
normal = np.array([self.xy_boundary[j + 1, 1] - self.xy_boundary[j, 1],
-(self.xy_boundary[j + 1, 0] - self.xy_boundary[j, 0])])
unit_normals[j] = normal / np.linalg.norm(normal)
else: # the set of points that close the shape
normal = np.array([self.xy_boundary[0, 1] - self.xy_boundary[j, 1],
-(self.xy_boundary[0, 0] - self.xy_boundary[j, 0])])
unit_normals[j] = normal / np.linalg.norm(normal)
self.unit_normals = unit_normals
def calculate_gradients(self):
unit_normals = self.unit_normals
# initialize array to hold distances from each point to each face
dfaceDistance_dx = np.zeros([self.n_wt * self.nVertices, self.n_wt])
dfaceDistance_dy = np.zeros([self.n_wt * self.nVertices, self.n_wt])
for i in range(0, self.n_wt):
# determine if point is inside or outside of each face, and distances from each face
for j in range(0, self.nVertices):
# define the derivative vectors from the point of interest to the first point of the face
dpa_dx = np.array([-1.0, 0.0])
dpa_dy = np.array([0.0, -1.0])
# find perpendicular distances derivatives from point to current surface (vector projection)
ddistanceVec_dx = np.vdot(dpa_dx, unit_normals[j]) * unit_normals[j]
ddistanceVec_dy = np.vdot(dpa_dy, unit_normals[j]) * unit_normals[j]
# calculate derivatives for the sign of perpendicular distances from point to current face
dfaceDistance_dx[i * self.nVertices + j, i] = np.vdot(ddistanceVec_dx, unit_normals[j])
dfaceDistance_dy[i * self.nVertices + j, i] = np.vdot(ddistanceVec_dy, unit_normals[j])
# return Jacobian dict
self.dfaceDistance_dx = dfaceDistance_dx
self.dfaceDistance_dy = dfaceDistance_dy
def calculate_distance_to_boundary(self, points):
"""
:param points: points that you want to calculate the distances from to the faces of the convex hull
:return face_distace: signed perpendicular distances from each point to each face; + is inside
"""
nPoints = np.array(points).shape[0]
nVertices = self.xy_boundary.shape[0]
vertices = self.xy_boundary
unit_normals = self.unit_normals
# initialize array to hold distances from each point to each face
face_distance = np.zeros([nPoints, nVertices])
# loop through points and find distances to each face
for i in range(0, nPoints):
# determine if point is inside or outside of each face, and distances from each face
for j in range(0, nVertices):
# define the vector from the point of interest to the first point of the face
pa = np.array([vertices[j, 0] - points[i, 0], vertices[j, 1] - points[i, 1]])
# find perpendicular distances from point to current surface (vector projection)
d_vec = np.vdot(pa, unit_normals[j]) * unit_normals[j]
# calculate the sign of perpendicular distances from point to current face (+ is inside, - is outside)
face_distance[i, j] = np.vdot(d_vec, unit_normals[j])
# print (face_distance)
return face_distance
# def setup(self):
#
# # Explicitly size input arrays
# self.add_input(topfarm.x_key, np.zeros(self.n_wt), units='m',
# desc='x coordinates of turbines in global ref. frame')
# self.add_input(topfarm.y_key, np.zeros(self.n_wt), units='m',
# desc='y coordinates of turbines in global ref. frame')
#
# # Explicitly size output array
# # (vector with positive elements if turbines outside of hull)
# self.add_output('boundaryDistances', self.zeros,
# desc="signed perpendicular distances from each turbine to each face CCW; + is inside")
#
# self.declare_partials('boundaryDistances', [topfarm.x_key, topfarm.y_key])
# # self.declare_partials('boundaryDistances', ['boundaryVertices', 'boundaryNormals'], method='fd')
def distances(self, turbineX, turbineY):
return self.calculate_distance_to_boundary(np.array([turbineX, turbineY]).T)
def gradients(self, turbineX, turbineY):
return self.dfaceDistance_dx, self.dfaceDistance_dy
# def compute(self, inputs, outputs):
# # calculate distances from each point to each face
# outputs['boundaryDistances'] = self.distances(**inputs)
#
# def compute_partials(self, inputs, partials):
# # return Jacobian dict
# dx, dy = self.gradients(**inputs)
#
# partials['boundaryDistances', topfarm.x_key] = dx
# partials['boundaryDistances', topfarm.y_key] = dy
def move_inside(self, turbineX, turbineY, turbineZ, pad=1.1):
x, y, z = [np.asarray(xyz, dtype=np.float) for xyz in [turbineX, turbineY, turbineZ]]
dist = self.distances(turbineX, turbineY)
dx, dy = self.gradients(x, y) # independent of position
dx = dx[:self.nVertices, 0]
dy = dy[:self.nVertices, 0]
for i in np.where(dist.min(1) < 0)[0]: # loop over turbines that violate edges
# find smallest movement that where the constraints are satisfied
d = dist[i]
v = np.linspace(-np.abs(d.min()), np.abs(d.min()), 100)
X, Y = np.meshgrid(v, v)
m = np.ones_like(X)
for j in range(3):
m = np.logical_and(m, X * dx[j] + Y * dy[j] >= -dist[i][j])
index = np.argmin(X[m]**2 + Y[m]**2)
x[i] += X[m][index]
y[i] += Y[m][index]
return x, y, z
class PolygonBoundaryComp(BoundaryBaseComp):
def __init__(self, n_wt, xy_boundary=None, z_boundary=None, **kwargs):
BoundaryBaseComp.__init__(self, n_wt, xy_boundary=xy_boundary, z_boundary=z_boundary, **kwargs)
self.nTurbines = n_wt
self.zeros = np.zeros(self.nTurbines)
vertices = self.xy_boundary
self.nVertices = vertices.shape[0]
def edges_counter_clockwise(vertices):
if np.any(vertices[0] != vertices[-1]):
vertices = np.r_[vertices, vertices[:1]]
x1, y1 = vertices[:-1].T
x2, y2 = vertices[1:].T
double_area = np.sum((x1 - x2) * (y1 + y2)) # 2 x Area (+: counterclockwise
assert double_area != 0, "Area must be non-zero"
if double_area < 0: #
return edges_counter_clockwise(vertices[::-1])
else:
return vertices[:-1], x1, y1, x2, y2
self.xy_boundary, self.x1, self.y1, self.x2, self.y2 = edges_counter_clockwise(vertices)
self.min_x, self.min_y = np.min([self.x1, self.x2], 0), np.min([self.y1, self.y2], )
self.max_x, self.max_y = np.max([self.x1, self.x2], 1), np.max([self.y1, self.y2], 0)
self.dx = self.x2 - self.x1
self.dy = self.y2 - self.y1
self.x2y1 = self.x2 * self.y1
self.y2x1 = self.y2 * self.x1
self.length = ((self.y2 - self.y1)**2 + (self.x2 - self.x1)**2)**0.5
self.edge_unit_vec = (np.array([self.dy, -self.dx]) / self.length)
v = np.hstack((self.edge_unit_vec, self.edge_unit_vec[:, :1]))
self.xy2_vec = v[:, :-1] + v[:, 1:]
self.xy1_vec = np.hstack((self.xy2_vec[:, -1:], self.xy2_vec[:, 1:]))
self.dEdgeDist_dx = -self.dy / self.length
self.dEdgeDist_dy = self.dx / self.length
self._cache_input = None
self._cache_output = None
# def setup(self):
#
# # Explicitly size input arrays
# self.add_input(topfarm.x_key, np.zeros(self.nTurbines), units='m',
# desc='x coordinates of turbines in global ref. frame')
# self.add_input(topfarm.y_key, np.zeros(self.nTurbines), units='m',
# desc='y coordinates of turbines in global ref. frame')
#
# # Explicitly size output array
# # (vector with positive elements if turbines outside of hull)
# self.add_output('boundaryDistances', self.zeros,
# desc="signed perpendicular distances from each turbine to each face CCW; + is inside")
#
# self.declare_partials('boundaryDistances', [topfarm.x_key, topfarm.y_key])
# # self.declare_partials('boundaryDistances', ['boundaryVertices', 'boundaryNormals'], method='fd')
def calc_distance_and_gradients(self, x, y):
"""
distances point(x,y) to edge((x1,y1)->(x2,y2))
+/-: inside/outside
case (x,y) closest to edge:
distances = edge_unit_vec dot (x1-x,y1-y)
ddist_dx = -(y2-y2)/|edge|
ddist_dy = (x2-x2)/|edge|
case (x,y) closest to (x1,y1) (and (x2,y2)):
sign = sign of distances to nearest edge
distances = sign * (x1-x^2 + y1-y)^2)^.5
ddist_dx = sign * 2*x-2*x1 / (2 * distances^.5)
ddist_dy = sign * 2*y-2*y1 / (2 * distances^.5)
"""
if np.all(np.array([x, y]) == self._cache_input):
return self._cache_output
X, Y = [np.tile(xy, (len(self.x1), 1)).T for xy in [x, y]] # dim = (ntb, nEdges)
X1, Y1, X2, Y2, ddist_dX, ddist_dY = [np.tile(xy, (len(x), 1))
for xy in [self.x1, self.y1, self.x2, self.y2, self.dEdgeDist_dx, self.dEdgeDist_dy]]
# perpendicular distances to edge (dot product)
d12 = (self.x1 - X) * self.edge_unit_vec[0] + (self.y1 - Y) * self.edge_unit_vec[1]
# nearest point on edge
px = X + d12 * self.edge_unit_vec[0]
py = Y + d12 * self.edge_unit_vec[1]
# distances to start and end points
d1 = np.sqrt((self.x1 - X)**2 + (self.y1 - Y)**2)
d2 = np.sqrt((self.x2 - X)**2 + (self.y2 - Y)**2)
# use start or end point if nearest point is outside edge
use_xy1 = (((self.dx != 0) & (px < self.x1) & (self.x1 < self.x2)) |
((self.dx != 0) & (px > self.x1) & (self.x1 > self.x2)) |
((self.dx == 0) & (py < self.y1) & (self.y1 < self.y2)) |
((self.dx == 0) & (py > self.y1) & (self.y1 > self.y2)))
use_xy2 = (((self.dx != 0) & (px > self.x2) & (self.x2 > self.x1)) |
((self.dx != 0) & (px < self.x2) & (self.x2 < self.x1)) |
((self.dx == 0) & (py > self.y2) & (self.y2 > self.y1)) |
((self.dx == 0) & (py < self.y2) & (self.y2 < self.y1)))
px[use_xy1] = X1[use_xy1]
py[use_xy1] = Y1[use_xy1]
px[use_xy2] = X2[use_xy2]
py[use_xy2] = Y2[use_xy2]
distance = d12.copy()
v = (px[use_xy1] - X[use_xy1]) * self.xy1_vec[0, np.where(use_xy1)[1]] + (py[use_xy1] - Y[use_xy1]) * self.xy1_vec[1, np.where(use_xy1)[1]]
sign_use_xy1 = np.choose(v >= 0, [-1, 1])
v = (px[use_xy2] - X[use_xy2]) * self.xy2_vec[0, np.where(use_xy2)[1]] + (py[use_xy2] - Y[use_xy2]) * self.xy2_vec[1, np.where(use_xy2)[1]]
sign_use_xy2 = np.choose(v >= 0, [-1, 1])
d12[use_xy2]
d12[:, 1:][use_xy2[:, :-1]]
distance[use_xy1] = sign_use_xy1 * d1[use_xy1]
distance[use_xy2] = sign_use_xy2 * d2[use_xy2]
length = np.sqrt((X1[use_xy1] - X[use_xy1])**2 + (Y1[use_xy1] - Y[use_xy1])**2)
ddist_dX[use_xy1] = sign_use_xy1 * (2 * X[use_xy1] - 2 * X1[use_xy1]) / (2 * length)
ddist_dY[use_xy1] = sign_use_xy1 * (2 * Y[use_xy1] - 2 * Y1[use_xy1]) / (2 * length)
length = np.sqrt((X2[use_xy2] - X[use_xy2])**2 + (Y2[use_xy2] - Y[use_xy2])**2)
ddist_dX[use_xy2] = sign_use_xy2 * (2 * X[use_xy2] - 2 * X2[use_xy2]) / (2 * length)
ddist_dY[use_xy2] = sign_use_xy2 * (2 * Y[use_xy2] - 2 * Y2[use_xy2]) / (2 * length)
closest_edge_index = np.argmin(np.abs(distance), 1)
self._cache_input = np.array([x, y])
self._cache_output = [np.choose(closest_edge_index, v.T) for v in [distance, ddist_dX, ddist_dY]]
return self._cache_output
def distances(self, turbineX, turbineY):
return self.calc_distance_and_gradients(turbineX, turbineY)[0]
def gradients(self, turbineX, turbineY):
_, dx, dy = self.calc_distance_and_gradients(turbineX, turbineY)
return np.diagflat(dx), np.diagflat(dy)
def move_inside(self, turbineX, turbineY, turbineZ, pad=1.1):
x, y, z = [np.asarray(xyz, dtype=np.float) for xyz in [turbineX, turbineY, turbineZ]]
dist = self.distances(turbineX, turbineY)
dx, dy = map(np.diag, self.gradients(x, y))
m = dist < 0
x[m] -= dx[m] * dist[m] * pad
y[m] -= dy[m] * dist[m] * pad
return x, y, z
| 47.4825
| 161
| 0.584847
|
ce581d40b1007c6a495da34c895bbb381677a2f9
| 4,443
|
py
|
Python
|
infer.py
|
rowantseng/FUnIE-GAN-PyTorch
|
d2c8064c7827001de3f4b7e71ae5b4fa2eff040c
|
[
"MIT"
] | 6
|
2021-07-27T09:03:41.000Z
|
2022-01-01T05:17:27.000Z
|
infer.py
|
rowantseng/FUnIE-GAN-PyTorch
|
d2c8064c7827001de3f4b7e71ae5b4fa2eff040c
|
[
"MIT"
] | null | null | null |
infer.py
|
rowantseng/FUnIE-GAN-PyTorch
|
d2c8064c7827001de3f4b7e71ae5b4fa2eff040c
|
[
"MIT"
] | 1
|
2020-08-25T23:17:06.000Z
|
2020-08-25T23:17:06.000Z
|
import argparse
import os
import time
import numpy as np
import torch
from datasets import TestDataset, denorm
from models import FUnIEGeneratorV1, FUnIEGeneratorV2, FUnIEUpGenerator
from torchvision import transforms
from utils import AverageMeter, ProgressMeter
class Predictor(object):
def __init__(self, model, test_loader, model_path, save_path, is_cuda):
self.test_loader = test_loader
self.save_path = save_path
os.makedirs(self.save_path, exist_ok=True)
self.is_cuda = is_cuda
self.print_freq = 20
# Load model weights
self.model = model
if not os.path.isfile(model_path):
raise FileNotFoundError(f"Model file '{model_path}' not found!")
self.load(model_path)
if self.is_cuda:
self.model.cuda()
def predict(self):
self.model.eval()
batch_time = AverageMeter("Time", "3.3f")
progress = ProgressMeter(len(self.test_loader), [
batch_time], prefix="Test: ")
with torch.no_grad():
end = time.time()
for batch_idx, (paths, images) in enumerate(self.test_loader):
bs = images.size(0)
if self.is_cuda:
images = images.cuda()
fake_images = self.model(images)
fake_images = denorm(fake_images.data)
fake_images = torch.clamp(fake_images, min=0., max=255.)
fake_images = fake_images.type(torch.uint8)
for idx in range(bs):
name = os.path.splitext(os.path.basename(paths[idx]))[0]
fake_image = fake_images[idx]
fake_image = transforms.ToPILImage()(fake_image).convert("RGB")
fake_image.save(f"{self.save_path}/{name}.png")
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % self.print_freq == 0:
progress.display(batch_idx)
return
def load(self, model):
device = "cuda:0" if self.is_cuda else "cpu"
ckpt = torch.load(model, map_location=device)
self.model.load_state_dict(ckpt["state_dict"])
print(f"At epoch: {ckpt['epoch']} (loss={ckpt['best_loss']:.3f})")
print(f">>> Load generator from {model}")
if __name__ == "__main__":
# Set seed
np.random.seed(77)
torch.manual_seed(77)
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.manual_seed(77)
model_names = ["v1", "v2", "unpair"]
model_archs = [FUnIEGeneratorV1, FUnIEGeneratorV2, FUnIEUpGenerator]
model_mapper = {m: net for m, net in zip(model_names, model_archs)}
parser = argparse.ArgumentParser(description="PyTorch FUnIE-GAN Inference")
parser.add_argument("-d", "--data", default="", type=str, metavar="PATH",
help="path to data (default: none)")
parser.add_argument("-a", "--arch", metavar="ARCH", default="v1",
choices=model_names,
help="model architecture: " +
" | ".join(model_names) +
" (default: v1)")
parser.add_argument("-j", "--workers", default=4, type=int, metavar="N",
help="number of data loading workers (default: 4)")
parser.add_argument("-b", "--batch-size", default=256, type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel")
parser.add_argument("-m", "--model", default="", type=str, metavar="PATH",
help="path to generator checkpoint (default: none)")
parser.add_argument("--save-path", default="", type=str, metavar="PATH",
help="path to save results (default: none)")
args = parser.parse_args()
# Build data loader
test_set = TestDataset(args.data, (256, 256))
test_loader = torch.utils.data.DataLoader(
test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
# Create predictor
net = model_mapper[args.arch]()
predictor = Predictor(net, test_loader, args.model,
args.save_path, is_cuda)
predictor.predict()
| 37.974359
| 86
| 0.58429
|
408429d06772254e75b32a35b2b019e001519176
| 250
|
py
|
Python
|
if/python/c_func.py
|
yabaud/libharu
|
d84867ebf9f3de6afd661d2cdaff102457fbc371
|
[
"Zlib"
] | 1,118
|
2015-01-09T10:40:33.000Z
|
2022-03-28T08:19:56.000Z
|
if/python/c_func.py
|
yabaud/libharu
|
d84867ebf9f3de6afd661d2cdaff102457fbc371
|
[
"Zlib"
] | 174
|
2015-01-28T18:41:32.000Z
|
2022-03-31T16:51:05.000Z
|
if/python/c_func.py
|
yabaud/libharu
|
d84867ebf9f3de6afd661d2cdaff102457fbc371
|
[
"Zlib"
] | 394
|
2015-01-23T17:06:52.000Z
|
2022-03-25T03:50:06.000Z
|
def printf(format, *optional):
#print 'format="%s"' % format
#print 'optional="%s"' % optional
if len(optional)==1:
optional=optional[0]
if format.endswith('\n'):
format=format[:-1]
print format % optional
| 27.777778
| 38
| 0.572
|
e5d29e80d9b31d79f50b94aebdd80840d24e2b53
| 1,931
|
py
|
Python
|
utils2devops/aws/network_acl.py
|
alainivars/utils2devops
|
ff7deb2bc315d18b693d53ae6472da5163a3260e
|
[
"Apache-2.0"
] | 2
|
2019-06-29T20:09:37.000Z
|
2020-12-09T22:38:57.000Z
|
utils2devops/aws/network_acl.py
|
alainivars/utils2devops
|
ff7deb2bc315d18b693d53ae6472da5163a3260e
|
[
"Apache-2.0"
] | null | null | null |
utils2devops/aws/network_acl.py
|
alainivars/utils2devops
|
ff7deb2bc315d18b693d53ae6472da5163a3260e
|
[
"Apache-2.0"
] | null | null | null |
import boto3
from utils2devops.aws import NetworkAcl, Gress
"""
Aws configuration iiles should be present:
~/.aws/credentials
~/.aws/config
"""
def list_network_acls(
profile_name: str = 'terraform',
region_name: str = 'us-east-1'
) -> [str]:
"""This function list all AWS network ACL how can access the profile
profile_name in the AWS region region_name.
:param profile_name: The AWS profile name to use.
:param region_name: The AWS region to use.
:returns: list of line or empty if nothing.
:raises: AttributeError, KeyError
"""
session = boto3.Session(profile_name=profile_name)
client = session.client(service_name='ec2', region_name=region_name)
elements = client.describe_network_acls()
_lines = []
if 'NetworkAcls' not in elements:
return _lines
for element in elements['NetworkAcls']:
x = NetworkAcl(element['NetworkAclId'])
x.vpc_id = element['VpcId']
x.subnet_ids = []
for p in element['Associations']:
x.subnet_ids.append(p['SubnetId'])
for p in element['Entries']:
if p['RuleNumber'] != 32767:
gress = Gress()
gress.rule_no = p['RuleNumber']
gress.action = p['RuleAction']
gress.cidr_block = p['CidrBlock']
gress.protocol = p['Protocol']
if 'PortRange' in p:
gress.from_port = p['PortRange']['From']
gress.to_port = p['PortRange']['To']
else:
gress.from_port = 0
gress.to_port = 0
if p['Egress']:
x.egress.append(gress)
else:
x.ingress.append(gress)
x.tags = element['Tags']
_lines.append(x)
return _lines
if __name__ == '__main__':
lines = list_network_acls()
print(*lines)
| 30.171875
| 72
| 0.570171
|
5b56e08726cbac0ee70886744d44ff7870b00685
| 10,276
|
py
|
Python
|
uncoverml/config.py
|
jesserobertson/uncover-ml
|
7e6506880c8e61d641fc7c0badb727b4540071e2
|
[
"Apache-2.0"
] | null | null | null |
uncoverml/config.py
|
jesserobertson/uncover-ml
|
7e6506880c8e61d641fc7c0badb727b4540071e2
|
[
"Apache-2.0"
] | null | null | null |
uncoverml/config.py
|
jesserobertson/uncover-ml
|
7e6506880c8e61d641fc7c0badb727b4540071e2
|
[
"Apache-2.0"
] | 1
|
2019-04-05T23:08:29.000Z
|
2019-04-05T23:08:29.000Z
|
import logging
from os import path
from os import makedirs
import glob
import csv
import yaml
from uncoverml import transforms
log = logging.getLogger(__name__)
"""The strings associated with each imputation option
"""
_imputers = {'mean': transforms.MeanImputer,
'gaus': transforms.GaussImputer,
'nn': transforms.NearestNeighboursImputer}
"""These transforms operate individually on each image before concatenation
"""
_image_transforms = {'onehot': transforms.OneHotTransform,
'randomhot': transforms.RandomHotTransform}
"""Post-concatenation transforms: operate on whole data vector
"""
_global_transforms = {'centre': transforms.CentreTransform,
'standardise': transforms.StandardiseTransform,
'whiten': transforms.WhitenTransform}
def _parse_transform_set(transform_dict, imputer_string, n_images=None):
"""Parse a dictionary read from yaml into a TransformSet object
Parameters
----------
transform_dict : dictionary
The dictionary as read from the yaml config file containing config
key-value pairs
imputer_string : string
The name of the imputer (could be None)
n_images : int > 0
The number of images being read in. Required because we need to create
a new image transform for each image
Returns
-------
image_transforms : list
A list of image Transform objects
imputer : Imputer
An Imputer object
global_transforms : list
A list of global Transform objects
"""
image_transforms = []
global_transforms = []
if imputer_string in _imputers:
imputer = _imputers[imputer_string]()
else:
imputer = None
if transform_dict is not None:
for t in transform_dict:
if type(t) is str:
t = {t: {}}
key, params = list(t.items())[0]
if key in _image_transforms:
image_transforms.append([_image_transforms[key](**params)
for k in range(n_images)])
elif key in _global_transforms:
global_transforms.append(_global_transforms[key](**params))
return image_transforms, imputer, global_transforms
class FeatureSetConfig:
"""Config class representing a 'feature set' in the config file
Parameters
----------
d : dictionary
The section of the yaml file for a feature set
"""
def __init__(self, d):
self.name = d['name']
self.type = d['type']
if d['type'] not in {'ordinal', 'categorical'}:
log.warning("Feature set type must be ordinal or categorical: "
"Unknown option "
"{} (assuming ordinal)".format(d['type']))
is_categorical = d['type'] == 'categorical'
# get list of all the files
files = []
for source in d['files']:
key = next(iter(source.keys()))
if key == 'path':
files.append(path.abspath(source[key]))
elif key == 'directory':
glob_string = path.join(path.abspath(source[key]), "*.tif")
f_list = glob.glob(glob_string)
files.extend(f_list)
elif key == 'list':
csvfile = path.abspath(source[key])
with open(csvfile, 'r') as f:
reader = csv.reader(f)
tifs = list(reader)
tifs = [f[0].strip() for f in tifs
if (len(f) > 0 and f[0].strip() and
f[0].strip()[0] != '#')]
for f in tifs:
files.append(path.abspath(f))
self.files = sorted(files, key=str.lower)
n_files = len(self.files)
trans_i, im, trans_g = _parse_transform_set(d['transforms'],
d['imputation'],
n_files)
self.transform_set = transforms.ImageTransformSet(trans_i, im, trans_g,
is_categorical)
class Config:
"""Class representing the global configuration of the uncoverml scripts
This class is *mostly* read-only, but it does also contain the Transform
objects which have state. TODO: separate these out!
Parameters
----------
yaml_file : string
The path to the yaml config file. For details on the yaml schema
see the uncoverml documentation
"""
def __init__(self, yaml_file):
with open(yaml_file, 'r') as f:
s = yaml.load(f)
self.name = path.basename(yaml_file).rsplit(".", 1)[0]
# TODO expose this option when fixed
if 'patchsize' in s:
log.info("Patchsize currently fixed at 0 -- ignoring")
self.patchsize = 0
self.algorithm = s['learning']['algorithm']
self.cubist = self.algorithm == 'cubist'
self.multicubist = self.algorithm == 'multicubist'
self.multirandomforest = self.algorithm == 'multirandomforest'
self.krige = self.algorithm == 'krige'
self.algorithm_args = s['learning']['arguments']
self.quantiles = s['prediction']['quantiles']
self.outbands = None
if 'outbands' in s['prediction']:
self.outbands = s['prediction']['outbands']
self.thumbnails = s['prediction']['thumbnails'] \
if 'thumbnails' in s['prediction'] else None
self.pickle = any(True for d in s['features'] if d['type'] == 'pickle')
self.rawcovariates = False
if self.pickle:
self.pickle_load = True
for n, d in enumerate(s['features']):
if d['type'] == 'pickle':
self.pickled_covariates = \
path.abspath(d['files']['covariates'])
self.pickled_targets = d['files']['targets']
if 'rawcovariates' in d['files']:
self.rawcovariates = d['files']['rawcovariates']
self.rawcovariates_mask = \
d['files']['rawcovariates_mask']
if not (path.exists(d['files']['covariates'])
and path.exists(d['files']['targets'])):
self.pickle_load = False
if self.cubist or self.multicubist:
self.featurevec = \
path.abspath(d['files']['featurevec'])
if not path.exists(d['files']['featurevec']):
self.pickle_load = False
if 'plot_covariates' in d['files']:
self.plot_covariates = d['files']['plot_covariates']
else:
self.plot_covariates = False
s['features'].pop(n)
else:
self.pickle_load = False
if not self.pickle_load:
log.info('One or both pickled files were not '
'found. All targets will be intersected.')
self.feature_sets = [FeatureSetConfig(k) for k in s['features']]
if 'preprocessing' in s:
final_transform = s['preprocessing']
_, im, trans_g = _parse_transform_set(
final_transform['transforms'], final_transform['imputation'])
self.final_transform = transforms.TransformSet(im, trans_g)
else:
self.final_transform = None
self.target_file = s['targets']['file']
self.target_property = s['targets']['property']
self.resample = None
if 'resample' in s['targets']:
self.resample = s['targets']['resample']
self.mask = None
if 'mask' in s:
self.mask = s['mask']['file']
self.retain = s['mask']['retain'] # mask areas that are predicted
self.lon_lat = False
if 'lon_lat' in s:
self.lon_lat = True
self.lat = s['lon_lat']['lat']
self.lon = s['lon_lat']['lon']
# TODO pipeline this better
self.rank_features = False
self.cross_validate = False
self.parallel_validate = False
if s['validation']:
for i in s['validation']:
if i == 'feature_rank':
self.rank_features = True
if i == 'parallel':
self.parallel_validate = True
if type(i) is dict and 'k-fold' in i:
self.cross_validate = True
self.folds = i['k-fold']['folds']
self.crossval_seed = i['k-fold']['random_seed']
break
if self.rank_features and self.pickle_load:
self.pickle_load = False
log.info('Feature ranking does not work with '
'pickled files. Pickled files will not be used. '
'All covariates will be intersected.')
self.output_dir = s['output']['directory']
# create output dir if does not exist
makedirs(self.output_dir, exist_ok=True)
if 'optimisation' in s:
self.optimisation = s['optimisation']
if 'optimisation_output' in self.optimisation:
self.optimisation_output = \
self.optimisation['optimisation_output']
self.cluster_analysis = False
if 'clustering' in s:
self.clustering_algorithm = s['clustering']['algorithm']
cluster_args = s['clustering']['arguments']
self.n_classes = cluster_args['n_classes']
self.oversample_factor = cluster_args['oversample_factor']
if 'file' in s['clustering'] and s['clustering']['file']:
self.semi_supervised = True
self.class_file = s['clustering']['file']
self.class_property = s['clustering']['property']
else:
self.semi_supervised = False
if 'cluster_analysis' in s['clustering']:
self.cluster_analysis = s['clustering']['cluster_analysis']
class ConfigException(Exception):
pass
| 38.343284
| 79
| 0.550701
|
077e5b6ead24a285f909ef118dd4027bbc170952
| 1,860
|
py
|
Python
|
iom/migrations/0009_auto_20150624_2348.py
|
acaciawater/iom
|
66ae7cb4134221ba03005b6e5d7b3f158dbc7550
|
[
"Apache-2.0"
] | null | null | null |
iom/migrations/0009_auto_20150624_2348.py
|
acaciawater/iom
|
66ae7cb4134221ba03005b6e5d7b3f158dbc7550
|
[
"Apache-2.0"
] | null | null | null |
iom/migrations/0009_auto_20150624_2348.py
|
acaciawater/iom
|
66ae7cb4134221ba03005b6e5d7b3f158dbc7550
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('iom', '0008_auto_20150624_2343'),
]
operations = [
migrations.CreateModel(
name='Waarnemer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('initialen', models.CharField(max_length=6)),
('voornaam', models.CharField(max_length=20, null=True, blank=True)),
('achternaam', models.CharField(max_length=40)),
('telefoon', models.CharField(blank=True, max_length=16, validators=[django.core.validators.RegexValidator(regex=b'^(?:\\+)?[0-9\\-]{10,11}$', message=b'Ongeldig telefoonnummer')])),
('email', models.EmailField(max_length=254, blank=True)),
('adres', models.ForeignKey(blank=True, to='iom.Adres', null=True)),
('organisatie', models.ForeignKey(blank=True, to='iom.Organisatie', null=True)),
],
options={
'verbose_name_plural': 'Waarnemers',
},
),
migrations.RemoveField(
model_name='eigenaar',
name='adres',
),
migrations.RemoveField(
model_name='eigenaar',
name='organisatie',
),
migrations.RemoveField(
model_name='meetpunt',
name='eigenaar',
),
migrations.DeleteModel(
name='Eigenaar',
),
migrations.AddField(
model_name='meetpunt',
name='waarnemer',
field=models.ForeignKey(default=1, to='iom.Waarnemer'),
preserve_default=False,
),
]
| 35.09434
| 198
| 0.560753
|
6349bb8ec11698ce1d6144412289b36db22a8cb1
| 117
|
py
|
Python
|
exercicios-Python/teste-01.py
|
pedrosimoes-programmer/exercicios-python
|
150de037496d63d76086678d87425a8ccfc74573
|
[
"MIT"
] | null | null | null |
exercicios-Python/teste-01.py
|
pedrosimoes-programmer/exercicios-python
|
150de037496d63d76086678d87425a8ccfc74573
|
[
"MIT"
] | null | null | null |
exercicios-Python/teste-01.py
|
pedrosimoes-programmer/exercicios-python
|
150de037496d63d76086678d87425a8ccfc74573
|
[
"MIT"
] | null | null | null |
nome = input('Qual é o seu nome? ');
print('Bem-vindo(a), ' , nome , ' , estamos muito felizes de tê-lo(a) aqui!');
| 39
| 79
| 0.598291
|
ae350ce16ce8640385026ea4eb25c241df85315d
| 458
|
py
|
Python
|
reference_manual_test.py
|
clean-code-craft-tcq-2/well-named-in-py-SanjaySaatyaki
|
08b343a7b401f294d2b4e161212256d1e48666d3
|
[
"MIT"
] | null | null | null |
reference_manual_test.py
|
clean-code-craft-tcq-2/well-named-in-py-SanjaySaatyaki
|
08b343a7b401f294d2b4e161212256d1e48666d3
|
[
"MIT"
] | null | null | null |
reference_manual_test.py
|
clean-code-craft-tcq-2/well-named-in-py-SanjaySaatyaki
|
08b343a7b401f294d2b4e161212256d1e48666d3
|
[
"MIT"
] | null | null | null |
import reference_manual as manual
import even_count_color_generator as generator
def test_get_all_color_codes():
contents = manual.get_all_color_codes()
for content in contents:
pairNumber = content.split(' ')[0].strip()
color_code = content.split(' ')[1:-1]
major_color, minor_color = generator.get_color_from_pair_number(int(pairNumber))
assert( major_color in color_code)
assert( minor_color in color_code)
| 41.636364
| 88
| 0.727074
|
c5ae3a56e64e4529136d5912d32600637f06223a
| 417
|
py
|
Python
|
base/migrations/0006_profile_history.py
|
polarity-cf/arugo
|
530ea6092702916d63f36308d5a615d118b73850
|
[
"MIT"
] | 34
|
2021-11-11T14:00:15.000Z
|
2022-03-16T12:30:04.000Z
|
base/migrations/0006_profile_history.py
|
polarity-cf/arugo
|
530ea6092702916d63f36308d5a615d118b73850
|
[
"MIT"
] | 22
|
2021-11-11T23:18:14.000Z
|
2022-03-31T15:07:02.000Z
|
base/migrations/0006_profile_history.py
|
polarity-cf/arugo
|
530ea6092702916d63f36308d5a615d118b73850
|
[
"MIT"
] | 1
|
2022-03-14T07:35:09.000Z
|
2022-03-14T07:35:09.000Z
|
# Generated by Django 3.2.9 on 2021-11-13 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0005_authquery_password'),
]
operations = [
migrations.AddField(
model_name='profile',
name='history',
field=models.CharField(default='[]', max_length=1000),
),
]
| 21.947368
| 67
| 0.568345
|
9d41f5985326b0ca644ae1ac193294d8f48089b0
| 561
|
py
|
Python
|
imagepy/menus/File/save_plg.py
|
pengguanjun/imagepy
|
d96ef98c2c3e93d368131fd2753bce164e1247cd
|
[
"BSD-4-Clause"
] | 1
|
2020-08-17T04:18:35.000Z
|
2020-08-17T04:18:35.000Z
|
imagepy/menus/File/save_plg.py
|
pengguanjun/imagepy
|
d96ef98c2c3e93d368131fd2753bce164e1247cd
|
[
"BSD-4-Clause"
] | null | null | null |
imagepy/menus/File/save_plg.py
|
pengguanjun/imagepy
|
d96ef98c2c3e93d368131fd2753bce164e1247cd
|
[
"BSD-4-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 5 03:19:13 2016
@author: yxl
"""
from imagepy.core.engine import dataio
from sciapp import Source
from imagepy.core.engine import Simple
class SaveImage(dataio.ImageWriter):
title = 'Save'
def load(self, ips):
self.filt = [i for i in sorted(Source.manager('writer').names())]
return True
class WindowCapture(dataio.ImageWriter):
title = 'Save With Mark'
filt = ['PNG']
def run(self, ips, imgs, para = None):
self.app.get_img_win().canvas.save_buffer(para['path'])
plgs = [SaveImage, WindowCapture]
| 23.375
| 67
| 0.7041
|
287e1a99789c8046689388f0dbd48a3e8e433e73
| 48,530
|
py
|
Python
|
octodns/record/__init__.py
|
mamercad/octodns
|
9441a51168e7319749057dead758a14e97cde5a2
|
[
"MIT"
] | null | null | null |
octodns/record/__init__.py
|
mamercad/octodns
|
9441a51168e7319749057dead758a14e97cde5a2
|
[
"MIT"
] | null | null | null |
octodns/record/__init__.py
|
mamercad/octodns
|
9441a51168e7319749057dead758a14e97cde5a2
|
[
"MIT"
] | null | null | null |
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ipaddress import IPv4Address, IPv6Address
from logging import getLogger
import re
from fqdn import FQDN
from ..equality import EqualityTupleMixin
from .geo import GeoCodes
class Change(object):
def __init__(self, existing, new):
self.existing = existing
self.new = new
@property
def record(self):
'Returns new if we have one, existing otherwise'
return self.new or self.existing
def __lt__(self, other):
self_record = self.record
other_record = other.record
return ((self_record.name, self_record._type) <
(other_record.name, other_record._type))
class Create(Change):
def __init__(self, new):
super(Create, self).__init__(None, new)
def __repr__(self, leader=''):
source = self.new.source.id if self.new.source else ''
return f'Create {self.new} ({source})'
class Update(Change):
# Leader is just to allow us to work around heven eating leading whitespace
# in our output. When we call this from the Manager.sync plan summary
# section we'll pass in a leader, otherwise we'll just let it default and
# do nothing
def __repr__(self, leader=''):
source = self.new.source.id if self.new.source else ''
return f'Update\n{leader} {self.existing} ->\n' \
f'{leader} {self.new} ({source})'
class Delete(Change):
def __init__(self, existing):
super(Delete, self).__init__(existing, None)
def __repr__(self, leader=''):
return f'Delete {self.existing}'
class ValidationError(Exception):
@classmethod
def build_message(cls, fqdn, reasons):
reasons = '\n - '.join(reasons)
return f'Invalid record {fqdn}\n - {reasons}'
def __init__(self, fqdn, reasons):
super(Exception, self).__init__(self.build_message(fqdn, reasons))
self.fqdn = fqdn
self.reasons = reasons
class Record(EqualityTupleMixin):
log = getLogger('Record')
@classmethod
def new(cls, zone, name, data, source=None, lenient=False):
name = str(name)
fqdn = f'{name}.{zone.name}' if name else zone.name
try:
_type = data['type']
except KeyError:
raise Exception(f'Invalid record {fqdn}, missing type')
try:
_class = {
'A': ARecord,
'AAAA': AaaaRecord,
'ALIAS': AliasRecord,
'CAA': CaaRecord,
'CNAME': CnameRecord,
'DNAME': DnameRecord,
'LOC': LocRecord,
'MX': MxRecord,
'NAPTR': NaptrRecord,
'NS': NsRecord,
'PTR': PtrRecord,
'SPF': SpfRecord,
'SRV': SrvRecord,
'SSHFP': SshfpRecord,
'TXT': TxtRecord,
'URLFWD': UrlfwdRecord,
}[_type]
except KeyError:
raise Exception(f'Unknown record type: "{_type}"')
reasons = _class.validate(name, fqdn, data)
try:
lenient |= data['octodns']['lenient']
except KeyError:
pass
if reasons:
if lenient:
cls.log.warning(ValidationError.build_message(fqdn, reasons))
else:
raise ValidationError(fqdn, reasons)
return _class(zone, name, data, source=source)
@classmethod
def validate(cls, name, fqdn, data):
reasons = []
if name == '@':
reasons.append('invalid name "@", use "" instead')
n = len(fqdn)
if n > 253:
reasons.append(f'invalid fqdn, "{fqdn}" is too long at {n} '
'chars, max is 253')
for label in name.split('.'):
n = len(label)
if n > 63:
reasons.append(f'invalid label, "{label}" is too long at {n}'
' chars, max is 63')
try:
ttl = int(data['ttl'])
if ttl < 0:
reasons.append('invalid ttl')
except KeyError:
reasons.append('missing ttl')
try:
if data['octodns']['healthcheck']['protocol'] \
not in ('HTTP', 'HTTPS', 'TCP'):
reasons.append('invalid healthcheck protocol')
except KeyError:
pass
return reasons
def __init__(self, zone, name, data, source=None):
self.log.debug('__init__: zone.name=%s, type=%11s, name=%s', zone.name,
self.__class__.__name__, name)
self.zone = zone
# force everything lower-case just to be safe
self.name = str(name).lower() if name else name
self.source = source
self.ttl = int(data['ttl'])
self._octodns = data.get('octodns', {})
def _data(self):
return {'ttl': self.ttl}
@property
def data(self):
return self._data()
@property
def fqdn(self):
if self.name:
return f'{self.name}.{self.zone.name}'
return self.zone.name
@property
def ignored(self):
return self._octodns.get('ignored', False)
@property
def excluded(self):
return self._octodns.get('excluded', [])
@property
def included(self):
return self._octodns.get('included', [])
def healthcheck_host(self, value=None):
healthcheck = self._octodns.get('healthcheck', {})
if healthcheck.get('protocol', None) == 'TCP':
return None
return healthcheck.get('host', self.fqdn[:-1]) or value
@property
def healthcheck_path(self):
healthcheck = self._octodns.get('healthcheck', {})
if healthcheck.get('protocol', None) == 'TCP':
return None
try:
return healthcheck['path']
except KeyError:
return '/_dns'
@property
def healthcheck_protocol(self):
try:
return self._octodns['healthcheck']['protocol']
except KeyError:
return 'HTTPS'
@property
def healthcheck_port(self):
try:
return int(self._octodns['healthcheck']['port'])
except KeyError:
return 443
def changes(self, other, target):
# We're assuming we have the same name and type if we're being compared
if self.ttl != other.ttl:
return Update(self, other)
def copy(self, zone=None):
data = self.data
data['type'] = self._type
data['octodns'] = self._octodns
return Record.new(
zone if zone else self.zone,
self.name,
data,
self.source,
lenient=True
)
# NOTE: we're using __hash__ and ordering methods that consider Records
# equivalent if they have the same name & _type. Values are ignored. This
# is useful when computing diffs/changes.
def __hash__(self):
return f'{self.name}:{self._type}'.__hash__()
def _equality_tuple(self):
return (self.name, self._type)
def __repr__(self):
# Make sure this is always overridden
raise NotImplementedError('Abstract base class, __repr__ required')
class GeoValue(EqualityTupleMixin):
geo_re = re.compile(r'^(?P<continent_code>\w\w)(-(?P<country_code>\w\w)'
r'(-(?P<subdivision_code>\w\w))?)?$')
@classmethod
def _validate_geo(cls, code):
reasons = []
match = cls.geo_re.match(code)
if not match:
reasons.append(f'invalid geo "{code}"')
return reasons
def __init__(self, geo, values):
self.code = geo
match = self.geo_re.match(geo)
self.continent_code = match.group('continent_code')
self.country_code = match.group('country_code')
self.subdivision_code = match.group('subdivision_code')
self.values = sorted(values)
@property
def parents(self):
bits = self.code.split('-')[:-1]
while bits:
yield '-'.join(bits)
bits.pop()
def _equality_tuple(self):
return (self.continent_code, self.country_code, self.subdivision_code,
self.values)
def __repr__(self):
return f"'Geo {self.continent_code} {self.country_code} " \
"{self.subdivision_code} {self.values}'"
class _ValuesMixin(object):
@classmethod
def validate(cls, name, fqdn, data):
reasons = super(_ValuesMixin, cls).validate(name, fqdn, data)
values = data.get('values', data.get('value', []))
reasons.extend(cls._value_type.validate(values, cls._type))
return reasons
def __init__(self, zone, name, data, source=None):
super(_ValuesMixin, self).__init__(zone, name, data, source=source)
try:
values = data['values']
except KeyError:
values = [data['value']]
self.values = sorted(self._value_type.process(values))
def changes(self, other, target):
if self.values != other.values:
return Update(self, other)
return super(_ValuesMixin, self).changes(other, target)
def _data(self):
ret = super(_ValuesMixin, self)._data()
if len(self.values) > 1:
values = [getattr(v, 'data', v) for v in self.values if v]
if len(values) > 1:
ret['values'] = values
elif len(values) == 1:
ret['value'] = values[0]
elif len(self.values) == 1:
v = self.values[0]
if v:
ret['value'] = getattr(v, 'data', v)
return ret
def __repr__(self):
values = "', '".join([str(v) for v in self.values])
klass = self.__class__.__name__
return f"<{klass} {self._type} {self.ttl}, {self.fqdn}, ['{values}']>"
class _GeoMixin(_ValuesMixin):
'''
Adds GeoDNS support to a record.
Must be included before `Record`.
'''
@classmethod
def validate(cls, name, fqdn, data):
reasons = super(_GeoMixin, cls).validate(name, fqdn, data)
try:
geo = dict(data['geo'])
for code, values in geo.items():
reasons.extend(GeoValue._validate_geo(code))
reasons.extend(cls._value_type.validate(values, cls._type))
except KeyError:
pass
return reasons
def __init__(self, zone, name, data, *args, **kwargs):
super(_GeoMixin, self).__init__(zone, name, data, *args, **kwargs)
try:
self.geo = dict(data['geo'])
except KeyError:
self.geo = {}
for code, values in self.geo.items():
self.geo[code] = GeoValue(code, values)
def _data(self):
ret = super(_GeoMixin, self)._data()
if self.geo:
geo = {}
for code, value in self.geo.items():
geo[code] = value.values
ret['geo'] = geo
return ret
def changes(self, other, target):
if target.SUPPORTS_GEO:
if self.geo != other.geo:
return Update(self, other)
return super(_GeoMixin, self).changes(other, target)
def __repr__(self):
if self.geo:
klass = self.__class__.__name__
return f'<{klass} {self._type} {self.ttl}, {self.fqdn}, ' \
f'{self.values}, {self.geo}>'
return super(_GeoMixin, self).__repr__()
class _ValueMixin(object):
@classmethod
def validate(cls, name, fqdn, data):
reasons = super(_ValueMixin, cls).validate(name, fqdn, data)
reasons.extend(cls._value_type.validate(data.get('value', None),
cls._type))
return reasons
def __init__(self, zone, name, data, source=None):
super(_ValueMixin, self).__init__(zone, name, data, source=source)
self.value = self._value_type.process(data['value'])
def changes(self, other, target):
if self.value != other.value:
return Update(self, other)
return super(_ValueMixin, self).changes(other, target)
def _data(self):
ret = super(_ValueMixin, self)._data()
if self.value:
ret['value'] = getattr(self.value, 'data', self.value)
return ret
def __repr__(self):
klass = self.__class__.__name__
return f'<{klass} {self._type} {self.ttl}, {self.fqdn}, {self.value}>'
class _DynamicPool(object):
log = getLogger('_DynamicPool')
def __init__(self, _id, data):
self._id = _id
values = [
{
'value': d['value'],
'weight': d.get('weight', 1),
'status': d.get('status', 'obey'),
} for d in data['values']
]
values.sort(key=lambda d: d['value'])
# normalize weight of a single-value pool
if len(values) == 1:
weight = data['values'][0].get('weight', 1)
if weight != 1:
self.log.warning(
'Using weight=1 instead of %s for single-value pool %s',
weight, _id)
values[0]['weight'] = 1
fallback = data.get('fallback', None)
self.data = {
'fallback': fallback if fallback != 'default' else None,
'values': values,
}
def _data(self):
return self.data
def __eq__(self, other):
if not isinstance(other, _DynamicPool):
return False
return self.data == other.data
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f'{self.data}'
class _DynamicRule(object):
def __init__(self, i, data):
self.i = i
self.data = {}
try:
self.data['pool'] = data['pool']
except KeyError:
pass
try:
self.data['geos'] = sorted(data['geos'])
except KeyError:
pass
def _data(self):
return self.data
def __eq__(self, other):
if not isinstance(other, _DynamicRule):
return False
return self.data == other.data
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f'{self.data}'
class _Dynamic(object):
def __init__(self, pools, rules):
self.pools = pools
self.rules = rules
def _data(self):
pools = {}
for _id, pool in self.pools.items():
pools[_id] = pool._data()
rules = []
for rule in self.rules:
rules.append(rule._data())
return {
'pools': pools,
'rules': rules,
}
def __eq__(self, other):
if not isinstance(other, _Dynamic):
return False
ret = self.pools == other.pools and self.rules == other.rules
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f'{self.pools}, {self.rules}'
class _DynamicMixin(object):
geo_re = re.compile(r'^(?P<continent_code>\w\w)(-(?P<country_code>\w\w)'
r'(-(?P<subdivision_code>\w\w))?)?$')
@classmethod
def validate(cls, name, fqdn, data):
reasons = super(_DynamicMixin, cls).validate(name, fqdn, data)
if 'dynamic' not in data:
return reasons
elif 'geo' in data:
reasons.append('"dynamic" record with "geo" content')
try:
pools = data['dynamic']['pools']
except KeyError:
pools = {}
pools_exist = set()
pools_seen = set()
pools_seen_as_fallback = set()
if not isinstance(pools, dict):
reasons.append('pools must be a dict')
elif not pools:
reasons.append('missing pools')
else:
for _id, pool in sorted(pools.items()):
if not isinstance(pool, dict):
reasons.append(f'pool "{_id}" must be a dict')
continue
try:
values = pool['values']
except KeyError:
reasons.append(f'pool "{_id}" is missing values')
continue
pools_exist.add(_id)
for i, value in enumerate(values):
value_num = i + 1
try:
weight = value['weight']
weight = int(weight)
if weight < 1 or weight > 100:
reasons.append(f'invalid weight "{weight}" in '
f'pool "{_id}" value {value_num}')
except KeyError:
pass
except ValueError:
reasons.append(f'invalid weight "{weight}" in '
f'pool "{_id}" value {value_num}')
try:
status = value['status']
if status not in ['up', 'down', 'obey']:
reasons.append(f'invalid status "{status}" in '
f'pool "{_id}" value {value_num}')
except KeyError:
pass
try:
value = value['value']
reasons.extend(cls._value_type.validate(value,
cls._type))
except KeyError:
reasons.append(f'missing value in pool "{_id}" '
f'value {value_num}')
if len(values) == 1 and values[0].get('weight', 1) != 1:
reasons.append(f'pool "{_id}" has single value with '
'weight!=1')
fallback = pool.get('fallback', None)
if fallback is not None:
if fallback in pools:
pools_seen_as_fallback.add(fallback)
else:
reasons.append(f'undefined fallback "{fallback}" '
f'for pool "{_id}"')
# Check for loops
fallback = pools[_id].get('fallback', None)
seen = [_id, fallback]
while fallback is not None:
# See if there's a next fallback
fallback = pools.get(fallback, {}).get('fallback', None)
if fallback in seen:
loop = ' -> '.join(seen)
reasons.append(f'loop in pool fallbacks: {loop}')
# exit the loop
break
seen.append(fallback)
try:
rules = data['dynamic']['rules']
except KeyError:
rules = []
if not isinstance(rules, (list, tuple)):
reasons.append('rules must be a list')
elif not rules:
reasons.append('missing rules')
else:
seen_default = False
for i, rule in enumerate(rules):
rule_num = i + 1
try:
pool = rule['pool']
except KeyError:
reasons.append(f'rule {rule_num} missing pool')
continue
try:
geos = rule['geos']
except KeyError:
geos = []
if not isinstance(pool, str):
reasons.append(f'rule {rule_num} invalid pool "{pool}"')
else:
if pool not in pools:
reasons.append(f'rule {rule_num} undefined pool '
f'"{pool}"')
elif pool in pools_seen and geos:
reasons.append(f'rule {rule_num} invalid, target '
f'pool "{pool}" reused')
pools_seen.add(pool)
if not geos:
if seen_default:
reasons.append(f'rule {rule_num} duplicate default')
seen_default = True
if not isinstance(geos, (list, tuple)):
reasons.append(f'rule {rule_num} geos must be a list')
else:
for geo in geos:
reasons.extend(GeoCodes.validate(geo,
f'rule {rule_num} '))
unused = pools_exist - pools_seen - pools_seen_as_fallback
if unused:
unused = '", "'.join(sorted(unused))
reasons.append(f'unused pools: "{unused}"')
return reasons
def __init__(self, zone, name, data, *args, **kwargs):
super(_DynamicMixin, self).__init__(zone, name, data, *args,
**kwargs)
self.dynamic = {}
if 'dynamic' not in data:
return
# pools
try:
pools = dict(data['dynamic']['pools'])
except:
pools = {}
for _id, pool in sorted(pools.items()):
pools[_id] = _DynamicPool(_id, pool)
# rules
try:
rules = list(data['dynamic']['rules'])
except:
rules = []
parsed = []
for i, rule in enumerate(rules):
parsed.append(_DynamicRule(i, rule))
# dynamic
self.dynamic = _Dynamic(pools, parsed)
def _data(self):
ret = super(_DynamicMixin, self)._data()
if self.dynamic:
ret['dynamic'] = self.dynamic._data()
return ret
def changes(self, other, target):
if target.SUPPORTS_DYNAMIC:
if self.dynamic != other.dynamic:
return Update(self, other)
return super(_DynamicMixin, self).changes(other, target)
def __repr__(self):
# TODO: improve this whole thing, we need multi-line...
if self.dynamic:
# TODO: this hack can't going to cut it, as part of said
# improvements the value types should deal with serializing their
# value
try:
values = self.values
except AttributeError:
values = self.value
klass = self.__class__.__name__
return f'<{klass} {self._type} {self.ttl}, {self.fqdn}, ' \
f'{values}, {self.dynamic}>'
return super(_DynamicMixin, self).__repr__()
class _IpList(object):
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
if len(data) == 0:
return ['missing value(s)']
reasons = []
for value in data:
if value == '':
reasons.append('empty value')
elif value is None:
reasons.append('missing value(s)')
else:
try:
cls._address_type(str(value))
except Exception:
addr_name = cls._address_name
reasons.append(f'invalid {addr_name} address "{value}"')
return reasons
@classmethod
def process(cls, values):
# Translating None into '' so that the list will be sortable in
# python3, get everything to str first
values = [str(v) if v is not None else '' for v in values]
# Now round trip all non-'' through the address type and back to a str
# to normalize the address representation.
return [str(cls._address_type(v)) if v != '' else ''
for v in values]
class Ipv4List(_IpList):
_address_name = 'IPv4'
_address_type = IPv4Address
class Ipv6List(_IpList):
_address_name = 'IPv6'
_address_type = IPv6Address
class _TargetValue(object):
@classmethod
def validate(cls, data, _type):
reasons = []
if data == '':
reasons.append('empty value')
elif not data:
reasons.append('missing value')
# NOTE: FQDN complains if the data it receives isn't a str, it doesn't
# allow unicode... This is likely specific to 2.7
elif not FQDN(str(data), allow_underscores=True).is_valid:
reasons.append(f'{_type} value "{data}" is not a valid FQDN')
elif not data.endswith('.'):
reasons.append(f'{_type} value "{data}" missing trailing .')
return reasons
@classmethod
def process(self, value):
if value:
return value.lower()
return value
class CnameValue(_TargetValue):
pass
class DnameValue(_TargetValue):
pass
class ARecord(_DynamicMixin, _GeoMixin, Record):
_type = 'A'
_value_type = Ipv4List
class AaaaRecord(_DynamicMixin, _GeoMixin, Record):
_type = 'AAAA'
_value_type = Ipv6List
class AliasValue(_TargetValue):
pass
class AliasRecord(_ValueMixin, Record):
_type = 'ALIAS'
_value_type = AliasValue
@classmethod
def validate(cls, name, fqdn, data):
reasons = []
if name != '':
reasons.append('non-root ALIAS not allowed')
reasons.extend(super(AliasRecord, cls).validate(name, fqdn, data))
return reasons
class CaaValue(EqualityTupleMixin):
# https://tools.ietf.org/html/rfc6844#page-5
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
try:
flags = int(value.get('flags', 0))
if flags < 0 or flags > 255:
reasons.append(f'invalid flags "{flags}"')
except ValueError:
reasons.append(f'invalid flags "{value["flags"]}"')
if 'tag' not in value:
reasons.append('missing tag')
if 'value' not in value:
reasons.append('missing value')
return reasons
@classmethod
def process(cls, values):
return [CaaValue(v) for v in values]
def __init__(self, value):
self.flags = int(value.get('flags', 0))
self.tag = value['tag']
self.value = value['value']
@property
def data(self):
return {
'flags': self.flags,
'tag': self.tag,
'value': self.value,
}
def _equality_tuple(self):
return (self.flags, self.tag, self.value)
def __repr__(self):
return f'{self.flags} {self.tag} "{self.value}"'
class CaaRecord(_ValuesMixin, Record):
_type = 'CAA'
_value_type = CaaValue
class CnameRecord(_DynamicMixin, _ValueMixin, Record):
_type = 'CNAME'
_value_type = CnameValue
@classmethod
def validate(cls, name, fqdn, data):
reasons = []
if name == '':
reasons.append('root CNAME not allowed')
reasons.extend(super(CnameRecord, cls).validate(name, fqdn, data))
return reasons
class DnameRecord(_DynamicMixin, _ValueMixin, Record):
_type = 'DNAME'
_value_type = DnameValue
class LocValue(EqualityTupleMixin):
# TODO: work out how to do defaults per RFC
@classmethod
def validate(cls, data, _type):
int_keys = [
'lat_degrees',
'lat_minutes',
'long_degrees',
'long_minutes',
]
float_keys = [
'lat_seconds',
'long_seconds',
'altitude',
'size',
'precision_horz',
'precision_vert',
]
direction_keys = [
'lat_direction',
'long_direction',
]
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
for key in int_keys:
try:
int(value[key])
if (
(
key == 'lat_degrees' and
not 0 <= int(value[key]) <= 90
) or (
key == 'long_degrees' and
not 0 <= int(value[key]) <= 180
) or (
key in ['lat_minutes', 'long_minutes'] and
not 0 <= int(value[key]) <= 59
)
):
reasons.append(f'invalid value for {key} '
f'"{value[key]}"')
except KeyError:
reasons.append(f'missing {key}')
except ValueError:
reasons.append(f'invalid {key} "{value[key]}"')
for key in float_keys:
try:
float(value[key])
if (
(
key in ['lat_seconds', 'long_seconds'] and
not 0 <= float(value[key]) <= 59.999
) or (
key == 'altitude' and
not -100000.00 <= float(value[key]) <= 42849672.95
) or (
key in ['size',
'precision_horz',
'precision_vert'] and
not 0 <= float(value[key]) <= 90000000.00
)
):
reasons.append(f'invalid value for {key} '
f'"{value[key]}"')
except KeyError:
reasons.append(f'missing {key}')
except ValueError:
reasons.append(f'invalid {key} "{value[key]}"')
for key in direction_keys:
try:
str(value[key])
if (
key == 'lat_direction' and
value[key] not in ['N', 'S']
):
reasons.append(f'invalid direction for {key} '
f'"{value[key]}"')
if (
key == 'long_direction' and
value[key] not in ['E', 'W']
):
reasons.append(f'invalid direction for {key} '
f'"{value[key]}"')
except KeyError:
reasons.append(f'missing {key}')
return reasons
@classmethod
def process(cls, values):
return [LocValue(v) for v in values]
def __init__(self, value):
self.lat_degrees = int(value['lat_degrees'])
self.lat_minutes = int(value['lat_minutes'])
self.lat_seconds = float(value['lat_seconds'])
self.lat_direction = value['lat_direction'].upper()
self.long_degrees = int(value['long_degrees'])
self.long_minutes = int(value['long_minutes'])
self.long_seconds = float(value['long_seconds'])
self.long_direction = value['long_direction'].upper()
self.altitude = float(value['altitude'])
self.size = float(value['size'])
self.precision_horz = float(value['precision_horz'])
self.precision_vert = float(value['precision_vert'])
@property
def data(self):
return {
'lat_degrees': self.lat_degrees,
'lat_minutes': self.lat_minutes,
'lat_seconds': self.lat_seconds,
'lat_direction': self.lat_direction,
'long_degrees': self.long_degrees,
'long_minutes': self.long_minutes,
'long_seconds': self.long_seconds,
'long_direction': self.long_direction,
'altitude': self.altitude,
'size': self.size,
'precision_horz': self.precision_horz,
'precision_vert': self.precision_vert,
}
def __hash__(self):
return hash((
self.lat_degrees,
self.lat_minutes,
self.lat_seconds,
self.lat_direction,
self.long_degrees,
self.long_minutes,
self.long_seconds,
self.long_direction,
self.altitude,
self.size,
self.precision_horz,
self.precision_vert,
))
def _equality_tuple(self):
return (
self.lat_degrees,
self.lat_minutes,
self.lat_seconds,
self.lat_direction,
self.long_degrees,
self.long_minutes,
self.long_seconds,
self.long_direction,
self.altitude,
self.size,
self.precision_horz,
self.precision_vert,
)
def __repr__(self):
return f"'{self.lat_degrees} {self.lat_minutes} " \
f"{self.lat_seconds:.3f} {self.lat_direction} " \
f"{self.long_degrees} {self.long_minutes} " \
f"{self.long_seconds:.3f} {self.long_direction} " \
f"{self.altitude:.2f}m {self.size:.2f}m " \
f"{self.precision_horz:.2f}m {self.precision_vert:.2f}m'"
class LocRecord(_ValuesMixin, Record):
_type = 'LOC'
_value_type = LocValue
class MxValue(EqualityTupleMixin):
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
try:
try:
int(value['preference'])
except KeyError:
int(value['priority'])
except KeyError:
reasons.append('missing preference')
except ValueError:
reasons.append(f'invalid preference "{value["preference"]}"')
exchange = None
try:
exchange = value.get('exchange', None) or value['value']
if not FQDN(str(exchange), allow_underscores=True).is_valid:
reasons.append(f'Invalid MX exchange "{exchange}" is not '
'a valid FQDN.')
elif not exchange.endswith('.'):
reasons.append(f'MX value "{exchange}" missing trailing .')
except KeyError:
reasons.append('missing exchange')
return reasons
@classmethod
def process(cls, values):
return [MxValue(v) for v in values]
def __init__(self, value):
# RFC1035 says preference, half the providers use priority
try:
preference = value['preference']
except KeyError:
preference = value['priority']
self.preference = int(preference)
# UNTIL 1.0 remove value fallback
try:
exchange = value['exchange']
except KeyError:
exchange = value['value']
self.exchange = exchange.lower()
@property
def data(self):
return {
'preference': self.preference,
'exchange': self.exchange,
}
def __hash__(self):
return hash((self.preference, self.exchange))
def _equality_tuple(self):
return (self.preference, self.exchange)
def __repr__(self):
return f"'{self.preference} {self.exchange}'"
class MxRecord(_ValuesMixin, Record):
_type = 'MX'
_value_type = MxValue
class NaptrValue(EqualityTupleMixin):
VALID_FLAGS = ('S', 'A', 'U', 'P')
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
try:
int(value['order'])
except KeyError:
reasons.append('missing order')
except ValueError:
reasons.append(f'invalid order "{value["order"]}"')
try:
int(value['preference'])
except KeyError:
reasons.append('missing preference')
except ValueError:
reasons.append(f'invalid preference "{value["preference"]}"')
try:
flags = value['flags']
if flags not in cls.VALID_FLAGS:
reasons.append(f'unrecognized flags "{flags}"')
except KeyError:
reasons.append('missing flags')
# TODO: validate these... they're non-trivial
for k in ('service', 'regexp', 'replacement'):
if k not in value:
reasons.append(f'missing {k}')
return reasons
@classmethod
def process(cls, values):
return [NaptrValue(v) for v in values]
def __init__(self, value):
self.order = int(value['order'])
self.preference = int(value['preference'])
self.flags = value['flags']
self.service = value['service']
self.regexp = value['regexp']
self.replacement = value['replacement']
@property
def data(self):
return {
'order': self.order,
'preference': self.preference,
'flags': self.flags,
'service': self.service,
'regexp': self.regexp,
'replacement': self.replacement,
}
def __hash__(self):
return hash(self.__repr__())
def _equality_tuple(self):
return (self.order, self.preference, self.flags, self.service,
self.regexp, self.replacement)
def __repr__(self):
flags = self.flags if self.flags is not None else ''
service = self.service if self.service is not None else ''
regexp = self.regexp if self.regexp is not None else ''
return f"'{self.order} {self.preference} \"{flags}\" \"{service}\" " \
f"\"{regexp}\" {self.replacement}'"
class NaptrRecord(_ValuesMixin, Record):
_type = 'NAPTR'
_value_type = NaptrValue
class _NsValue(object):
@classmethod
def validate(cls, data, _type):
if not data:
return ['missing value(s)']
elif not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
if not FQDN(str(value), allow_underscores=True).is_valid:
reasons.append(f'Invalid NS value "{value}" is not '
'a valid FQDN.')
elif not value.endswith('.'):
reasons.append(f'NS value "{value}" missing trailing .')
return reasons
@classmethod
def process(cls, values):
return values
class NsRecord(_ValuesMixin, Record):
_type = 'NS'
_value_type = _NsValue
class PtrValue(_TargetValue):
@classmethod
def validate(cls, values, _type):
if not isinstance(values, list):
values = [values]
reasons = []
if not values:
reasons.append('missing values')
for value in values:
reasons.extend(super(PtrValue, cls).validate(value, _type))
return reasons
@classmethod
def process(cls, values):
return [super(PtrValue, cls).process(v) for v in values]
class PtrRecord(_ValuesMixin, Record):
_type = 'PTR'
_value_type = PtrValue
# This is for backward compatibility with providers that don't support
# multi-value PTR records.
@property
def value(self):
return self.values[0]
class SshfpValue(EqualityTupleMixin):
VALID_ALGORITHMS = (1, 2, 3, 4)
VALID_FINGERPRINT_TYPES = (1, 2)
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
try:
algorithm = int(value['algorithm'])
if algorithm not in cls.VALID_ALGORITHMS:
reasons.append(f'unrecognized algorithm "{algorithm}"')
except KeyError:
reasons.append('missing algorithm')
except ValueError:
reasons.append(f'invalid algorithm "{value["algorithm"]}"')
try:
fingerprint_type = int(value['fingerprint_type'])
if fingerprint_type not in cls.VALID_FINGERPRINT_TYPES:
reasons.append('unrecognized fingerprint_type '
f'"{fingerprint_type}"')
except KeyError:
reasons.append('missing fingerprint_type')
except ValueError:
reasons.append('invalid fingerprint_type '
f'"{value["fingerprint_type"]}"')
if 'fingerprint' not in value:
reasons.append('missing fingerprint')
return reasons
@classmethod
def process(cls, values):
return [SshfpValue(v) for v in values]
def __init__(self, value):
self.algorithm = int(value['algorithm'])
self.fingerprint_type = int(value['fingerprint_type'])
self.fingerprint = value['fingerprint']
@property
def data(self):
return {
'algorithm': self.algorithm,
'fingerprint_type': self.fingerprint_type,
'fingerprint': self.fingerprint,
}
def __hash__(self):
return hash(self.__repr__())
def _equality_tuple(self):
return (self.algorithm, self.fingerprint_type, self.fingerprint)
def __repr__(self):
return f"'{self.algorithm} {self.fingerprint_type} {self.fingerprint}'"
class SshfpRecord(_ValuesMixin, Record):
_type = 'SSHFP'
_value_type = SshfpValue
class _ChunkedValuesMixin(_ValuesMixin):
CHUNK_SIZE = 255
_unescaped_semicolon_re = re.compile(r'\w;')
def chunked_value(self, value):
value = value.replace('"', '\\"')
vs = [value[i:i + self.CHUNK_SIZE]
for i in range(0, len(value), self.CHUNK_SIZE)]
vs = '" "'.join(vs)
return f'"{vs}"'
@property
def chunked_values(self):
values = []
for v in self.values:
values.append(self.chunked_value(v))
return values
class _ChunkedValue(object):
_unescaped_semicolon_re = re.compile(r'\w;')
@classmethod
def validate(cls, data, _type):
if not data:
return ['missing value(s)']
elif not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
if cls._unescaped_semicolon_re.search(value):
reasons.append(f'unescaped ; in "{value}"')
return reasons
@classmethod
def process(cls, values):
ret = []
for v in values:
if v and v[0] == '"':
v = v[1:-1]
ret.append(v.replace('" "', ''))
return ret
class SpfRecord(_ChunkedValuesMixin, Record):
_type = 'SPF'
_value_type = _ChunkedValue
class SrvValue(EqualityTupleMixin):
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
# TODO: validate algorithm and fingerprint_type values
try:
int(value['priority'])
except KeyError:
reasons.append('missing priority')
except ValueError:
reasons.append(f'invalid priority "{value["priority"]}"')
try:
int(value['weight'])
except KeyError:
reasons.append('missing weight')
except ValueError:
reasons.append(f'invalid weight "{value["weight"]}"')
try:
int(value['port'])
except KeyError:
reasons.append('missing port')
except ValueError:
reasons.append(f'invalid port "{value["port"]}"')
try:
target = value['target']
if not target.endswith('.'):
reasons.append(f'SRV value "{target}" missing trailing .')
if target != '.' and \
not FQDN(str(target), allow_underscores=True).is_valid:
reasons.append(f'Invalid SRV target "{target}" is not '
'a valid FQDN.')
except KeyError:
reasons.append('missing target')
return reasons
@classmethod
def process(cls, values):
return [SrvValue(v) for v in values]
def __init__(self, value):
self.priority = int(value['priority'])
self.weight = int(value['weight'])
self.port = int(value['port'])
self.target = value['target'].lower()
@property
def data(self):
return {
'priority': self.priority,
'weight': self.weight,
'port': self.port,
'target': self.target,
}
def __hash__(self):
return hash(self.__repr__())
def _equality_tuple(self):
return (self.priority, self.weight, self.port, self.target)
def __repr__(self):
return f"'{self.priority} {self.weight} {self.port} {self.target}'"
class SrvRecord(_ValuesMixin, Record):
_type = 'SRV'
_value_type = SrvValue
_name_re = re.compile(r'^(\*|_[^\.]+)\.[^\.]+')
@classmethod
def validate(cls, name, fqdn, data):
reasons = []
if not cls._name_re.match(name):
reasons.append('invalid name for SRV record')
reasons.extend(super(SrvRecord, cls).validate(name, fqdn, data))
return reasons
class _TxtValue(_ChunkedValue):
pass
class TxtRecord(_ChunkedValuesMixin, Record):
_type = 'TXT'
_value_type = _TxtValue
class UrlfwdValue(EqualityTupleMixin):
VALID_CODES = (301, 302)
VALID_MASKS = (0, 1, 2)
VALID_QUERY = (0, 1)
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
try:
code = int(value['code'])
if code not in cls.VALID_CODES:
reasons.append(f'unrecognized return code "{code}"')
except KeyError:
reasons.append('missing code')
except ValueError:
reasons.append(f'invalid return code "{value["code"]}"')
try:
masking = int(value['masking'])
if masking not in cls.VALID_MASKS:
reasons.append(f'unrecognized masking setting "{masking}"')
except KeyError:
reasons.append('missing masking')
except ValueError:
reasons.append(f'invalid masking setting "{value["masking"]}"')
try:
query = int(value['query'])
if query not in cls.VALID_QUERY:
reasons.append(f'unrecognized query setting "{query}"')
except KeyError:
reasons.append('missing query')
except ValueError:
reasons.append(f'invalid query setting "{value["query"]}"')
for k in ('path', 'target'):
if k not in value:
reasons.append(f'missing {k}')
return reasons
@classmethod
def process(cls, values):
return [UrlfwdValue(v) for v in values]
def __init__(self, value):
self.path = value['path']
self.target = value['target']
self.code = int(value['code'])
self.masking = int(value['masking'])
self.query = int(value['query'])
@property
def data(self):
return {
'path': self.path,
'target': self.target,
'code': self.code,
'masking': self.masking,
'query': self.query,
}
def __hash__(self):
return hash(self.__repr__())
def _equality_tuple(self):
return (self.path, self.target, self.code, self.masking, self.query)
def __repr__(self):
return f'"{self.path}" "{self.target}" {self.code} ' \
f'{self.masking} {self.query}'
class UrlfwdRecord(_ValuesMixin, Record):
_type = 'URLFWD'
_value_type = UrlfwdValue
| 31.108974
| 79
| 0.524232
|
36bb21f9290fad17e6803256ee17637cf5b8dfaa
| 9,610
|
py
|
Python
|
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/j6_2ch_128mm_zoom.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/j6_2ch_128mm_zoom.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/j6_2ch_128mm_zoom.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 1
num_epochs_train = 470
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*9/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotate": (-180, 180),
"shear": (0, 0),
"zoom_x": (-0.5, 1.5),
"zoom_y": (-0.5, 1.5),
"skew_x": (-10, 10),
"skew_y": (-10, 10),
"translate": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0)
}
def filter_samples(folders):
# don't use patients who don't have 2ch
import glob
def has_2ch(f):
return len(glob.glob(f+"/2ch_*.pkl")) > 0
return [folder for folder in folders if has_2ch(folder)]
use_hough_roi = True # use roi to center patches
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(128,128)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 1000 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = list(data_sizes["sliced:data:singleslice:2ch"])
input_size[0] = None
l0 = nn.layers.InputLayer( tuple(input_size) )
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop)
l_systole = layers.CumSumLayer(ldsys3dropnorm)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3dropnorm = layers.NormalisationLayer(lddia3drop)
l_diastole = layers.CumSumLayer(lddia3dropnorm)
return {
"inputs":{
"sliced:data:singleslice:2ch": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
"meta_outputs": {
"systole": ldsys2,
"diastole": lddia2,
}
}
| 45.330189
| 176
| 0.704891
|
4c9e268276a48861549b3e7f0e4a2819d236d2cc
| 2,544
|
py
|
Python
|
contrib/demo/sharding/add-cluster.py
|
florkbr/kcp
|
33ba15f95927daeaf0239f6176e08becff0cae3d
|
[
"Apache-2.0"
] | 1,189
|
2021-05-05T06:30:17.000Z
|
2022-03-30T13:14:08.000Z
|
contrib/demo/sharding/add-cluster.py
|
florkbr/kcp
|
33ba15f95927daeaf0239f6176e08becff0cae3d
|
[
"Apache-2.0"
] | 509
|
2021-05-05T00:26:21.000Z
|
2022-03-31T16:56:19.000Z
|
contrib/demo/sharding/add-cluster.py
|
florkbr/kcp
|
33ba15f95927daeaf0239f6176e08becff0cae3d
|
[
"Apache-2.0"
] | 154
|
2021-05-05T09:07:30.000Z
|
2022-03-24T14:01:48.000Z
|
#!/usr/bin/env python
# Copyright 2021 The KCP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import yaml
if len(sys.argv) != 3:
print("Usage: {} <donor> <recipient>".format(sys.argv[0]))
sys.exit(1)
print("Extracting cluster from {}".format(sys.argv[1]))
with open(sys.argv[1]) as raw_donor:
donor = yaml.load(raw_donor, yaml.FullLoader)
foundDonor = False
for cluster in donor["clusters"]:
if cluster["name"] == "user":
foundDonor = True
clusterName = cluster["cluster"]["server"]
clusterName = clusterName[clusterName.index("clusters/") + 9:]
if not foundDonor:
print("Did not find cluster 'user' in donor")
sys.exit(1)
print("Donating cluster to {}".format(sys.argv[2]))
with open(sys.argv[2], "r") as raw_recipient:
recipient = yaml.load(raw_recipient, yaml.FullLoader)
foundCluster = False
for cluster in recipient["clusters"]:
if cluster["name"] == "user":
foundCluster = True
copiedCluster = yaml.load(yaml.dump(cluster), yaml.FullLoader)
copiedServer = copiedCluster["cluster"]["server"]
copiedServer = copiedServer[:copiedServer.index("clusters/") + 9] + clusterName
copiedCluster["cluster"]["server"] = copiedServer
copiedCluster["name"] = "other"
recipient["clusters"].append(copiedCluster)
if not foundCluster:
print("Did not find cluster 'user' in recipient")
sys.exit(1)
foundContext = False
for context in recipient["contexts"]:
if context["name"] == "user":
foundContext = True
copiedContext = yaml.load(yaml.dump(context), yaml.FullLoader)
copiedContext["name"] = "other"
copiedContext["context"]["cluster"] = "other"
recipient["contexts"].append(copiedContext)
if not foundContext:
print("Did not find context 'user' in recipient")
sys.exit(1)
with open(sys.argv[2], "w") as raw_recipient:
yaml.dump(recipient, raw_recipient)
| 36.869565
| 91
| 0.66195
|
6c6a8189869f36e3bfce640cd4f57b77c660e204
| 2,774
|
py
|
Python
|
toolchain/riscv/Darwin/riscv64-unknown-elf/lib/rv32eac_zba_zbb/ilp32e/libstdc++.a-gdb.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | null | null | null |
toolchain/riscv/Darwin/riscv64-unknown-elf/lib/rv32eac_zba_zbb/ilp32e/libstdc++.a-gdb.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | null | null | null |
toolchain/riscv/Darwin/riscv64-unknown-elf/lib/rv32eac_zba_zbb/ilp32e/libstdc++.a-gdb.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | null | null | null |
# -*- python -*-
# Copyright (C) 2009-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp01--build-binary-packages--parameterized/obj/x86_64-apple-darwin/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-apple-darwin/share/gcc-10.2.0/python'
libdir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp01--build-binary-packages--parameterized/obj/x86_64-apple-darwin/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-apple-darwin/riscv64-unknown-elf/lib/rv32eac_zba_zbb/ilp32e'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 44.741935
| 239
| 0.736482
|
f77a36d06e9e98739837765cc1a916d3ce670ff9
| 658
|
py
|
Python
|
services/document-manager/backend/app/constants.py
|
parc-jason/mds
|
8f181a429442208a061ed72065b71e6c2bd0f76f
|
[
"Apache-2.0"
] | 14
|
2019-09-23T18:02:27.000Z
|
2022-01-12T06:46:52.000Z
|
services/document-manager/backend/app/constants.py
|
parc-jason/mds
|
8f181a429442208a061ed72065b71e6c2bd0f76f
|
[
"Apache-2.0"
] | 715
|
2019-10-01T21:04:57.000Z
|
2022-03-31T21:31:20.000Z
|
services/document-manager/backend/app/constants.py
|
parc-jason/mds
|
8f181a429442208a061ed72065b71e6c2bd0f76f
|
[
"Apache-2.0"
] | 9
|
2019-10-03T21:17:11.000Z
|
2021-09-02T23:26:39.000Z
|
# Cache Timeouts
TIMEOUT_5_MINUTES = 300
TIMEOUT_60_MINUTES = 3600
TIMEOUT_24_HOURS = 86340
TIMEOUT_12_HOURS = 43140
# Cache keys
def FILE_UPLOAD_SIZE(document_guid): return f'document-manager:{document_guid}:file-size'
def FILE_UPLOAD_OFFSET(document_guid): return f'document-manager:{document_guid}:offset'
def FILE_UPLOAD_PATH(document_guid): return f'document-manager:{document_guid}:file-path'
def DOWNLOAD_TOKEN(token_guid): return f'document-manager:download-token:{token_guid}'
# Document Upload constants
TUS_API_VERSION = '1.0.0'
TUS_API_SUPPORTED_VERSIONS = '1.0.0'
FORBIDDEN_FILETYPES = ('js', 'php', 'pl', 'py', 'rb', 'sh', 'so', 'exe', 'dll')
| 41.125
| 89
| 0.779635
|
32a7dee2f97d5b7c0fd250ec086a7c10ab65b5d7
| 1,844
|
py
|
Python
|
lib/cherrypy/cherrypy/test/checkerdemo.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/cherrypy/cherrypy/test/checkerdemo.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/cherrypy/cherrypy/test/checkerdemo.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 162
|
2015-01-01T00:21:16.000Z
|
2022-02-23T02:36:04.000Z
|
"""Demonstration app for cherrypy.checker.
This application is intentionally broken and badly designed.
To demonstrate the output of the CherryPy Checker, simply execute
this module.
"""
import os
import cherrypy
thisdir = os.path.dirname(os.path.abspath(__file__))
class Root:
pass
if __name__ == '__main__':
conf = {'/base': {'tools.staticdir.root': thisdir,
# Obsolete key.
'throw_errors': True,
},
# This entry should be OK.
'/base/static': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'},
# Warn on missing folder.
'/base/js': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'},
# Warn on dir with an abs path even though we provide root.
'/base/static2': {'tools.staticdir.on': True,
'tools.staticdir.dir': '/static'},
# Warn on dir with a relative path with no root.
'/static3': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'},
# Warn on unknown namespace
'/unknown': {'toobles.gzip.on': True},
# Warn special on cherrypy.<known ns>.*
'/cpknown': {'cherrypy.tools.encode.on': True},
# Warn on mismatched types
'/conftype': {'request.show_tracebacks': 14},
# Warn on unknown tool.
'/web': {'tools.unknown.on': True},
# Warn on server.* in app config.
'/app1': {'server.socket_host': '0.0.0.0'},
# Warn on 'localhost'
'global': {'server.socket_host': 'localhost'},
# Warn on '[name]'
'[/extra_brackets]': {},
}
cherrypy.quickstart(Root(), config=conf)
| 38.416667
| 71
| 0.5282
|
275d94efa66fd0d19f63059273aaee8563d1b58f
| 7,966
|
py
|
Python
|
homeassistant/components/elkm1/alarm_control_panel.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 2
|
2019-10-19T15:07:32.000Z
|
2022-01-29T10:33:20.000Z
|
homeassistant/components/elkm1/alarm_control_panel.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 4
|
2021-02-08T21:05:14.000Z
|
2021-09-08T02:57:03.000Z
|
homeassistant/components/elkm1/alarm_control_panel.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 2
|
2019-01-21T05:49:23.000Z
|
2019-02-19T16:30:48.000Z
|
"""Each ElkM1 area will be created as a separate alarm_control_panel."""
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from . import DOMAIN as ELK_DOMAIN, ElkEntity, create_elk_entities
SIGNAL_ARM_ENTITY = "elkm1_arm"
SIGNAL_DISPLAY_MESSAGE = "elkm1_display_message"
ELK_ALARM_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Required(ATTR_CODE): vol.All(vol.Coerce(int), vol.Range(0, 999999)),
}
)
DISPLAY_MESSAGE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Optional("clear", default=2): vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional("beep", default=False): cv.boolean,
vol.Optional("timeout", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=65535)
),
vol.Optional("line1", default=""): cv.string,
vol.Optional("line2", default=""): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ElkM1 alarm platform."""
if discovery_info is None:
return
elk_datas = hass.data[ELK_DOMAIN]
entities = []
for elk_data in elk_datas.values():
elk = elk_data["elk"]
entities = create_elk_entities(elk_data, elk.areas, "area", ElkArea, entities)
async_add_entities(entities, True)
def _dispatch(signal, entity_ids, *args):
for entity_id in entity_ids:
async_dispatcher_send(hass, "{}_{}".format(signal, entity_id), *args)
def _arm_service(service):
entity_ids = service.data.get(ATTR_ENTITY_ID, [])
arm_level = _arm_services().get(service.service)
args = (arm_level, service.data.get(ATTR_CODE))
_dispatch(SIGNAL_ARM_ENTITY, entity_ids, *args)
for service in _arm_services():
hass.services.async_register(
alarm.DOMAIN, service, _arm_service, ELK_ALARM_SERVICE_SCHEMA
)
def _display_message_service(service):
entity_ids = service.data.get(ATTR_ENTITY_ID, [])
data = service.data
args = (
data["clear"],
data["beep"],
data["timeout"],
data["line1"],
data["line2"],
)
_dispatch(SIGNAL_DISPLAY_MESSAGE, entity_ids, *args)
hass.services.async_register(
alarm.DOMAIN,
"elkm1_alarm_display_message",
_display_message_service,
DISPLAY_MESSAGE_SERVICE_SCHEMA,
)
def _arm_services():
from elkm1_lib.const import ArmLevel
return {
"elkm1_alarm_arm_vacation": ArmLevel.ARMED_VACATION.value,
"elkm1_alarm_arm_home_instant": ArmLevel.ARMED_STAY_INSTANT.value,
"elkm1_alarm_arm_night_instant": ArmLevel.ARMED_NIGHT_INSTANT.value,
}
class ElkArea(ElkEntity, alarm.AlarmControlPanel):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
def __init__(self, element, elk, elk_data):
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._changed_by_entity_id = ""
self._state = None
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
async_dispatcher_connect(
self.hass,
"{}_{}".format(SIGNAL_ARM_ENTITY, self.entity_id),
self._arm_service,
)
async_dispatcher_connect(
self.hass,
"{}_{}".format(SIGNAL_DISPLAY_MESSAGE, self.entity_id),
self._display_message,
)
def _watch_keypad(self, keypad, changeset):
if keypad.area != self._element.index:
return
if changeset.get("last_user") is not None:
self._changed_by_entity_id = self.hass.data[ELK_DOMAIN][self._prefix][
"keypads"
].get(keypad.index, "")
self.async_schedule_update_ha_state(True)
@property
def code_format(self):
"""Return the alarm code format."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the element."""
return self._state
@property
def device_state_attributes(self):
"""Attributes of the area."""
from elkm1_lib.const import AlarmState, ArmedStatus, ArmUpState
attrs = self.initial_attrs()
elmt = self._element
attrs["is_exit"] = elmt.is_exit
attrs["timer1"] = elmt.timer1
attrs["timer2"] = elmt.timer2
if elmt.armed_status is not None:
attrs["armed_status"] = ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs["arm_up_state"] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs["alarm_state"] = AlarmState(elmt.alarm_state).name.lower()
attrs["changed_by_entity_id"] = self._changed_by_entity_id
return attrs
def _element_changed(self, element, changeset):
from elkm1_lib.const import ArmedStatus
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._area_is_in_alarm_state():
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = (
STATE_ALARM_ARMING if self._element.is_exit else STATE_ALARM_PENDING
)
else:
self._state = elk_state_to_hass_state[self._element.armed_status]
def _entry_exit_timer_is_running(self):
return self._element.timer1 > 0 or self._element.timer2 > 0
def _area_is_in_alarm_state(self):
from elkm1_lib.const import AlarmState
return self._element.alarm_state >= AlarmState.FIRE_ALARM.value
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
from elkm1_lib.const import ArmLevel
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
from elkm1_lib.const import ArmLevel
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
from elkm1_lib.const import ArmLevel
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def _arm_service(self, arm_level, code):
self._element.arm(arm_level, code)
async def _display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
| 34.938596
| 86
| 0.663319
|
2c45407fb3b1121b90ed8f09adf6b950aae1beca
| 782
|
py
|
Python
|
tests/flows/test_sv.py
|
mfkiwl/siliconcompiler
|
49a16d9a07c526821afe1ce2f2d77394e439ca05
|
[
"Apache-2.0"
] | 1
|
2022-01-19T01:12:43.000Z
|
2022-01-19T01:12:43.000Z
|
tests/flows/test_sv.py
|
mfkiwl/siliconcompiler
|
49a16d9a07c526821afe1ce2f2d77394e439ca05
|
[
"Apache-2.0"
] | null | null | null |
tests/flows/test_sv.py
|
mfkiwl/siliconcompiler
|
49a16d9a07c526821afe1ce2f2d77394e439ca05
|
[
"Apache-2.0"
] | null | null | null |
import siliconcompiler
import os
import pytest
@pytest.mark.eda
@pytest.mark.quick
def test_sv(datadir):
'''Test that we can successfully synthesize a SystemVerilog design using the
asicflow.
'''
design = 'prim_fifo_sync'
chip = siliconcompiler.Chip(design=design)
chip.add('source', os.path.join(datadir, 'sv', 'prim_util_pkg.sv'))
chip.add('source', os.path.join(datadir, 'sv', f'{design}.sv'))
chip.add('idir', os.path.join(datadir, 'sv', 'inc/'))
chip.add('define', 'SYNTHESIS')
chip.set('frontend', 'systemverilog')
chip.target('asicflow_freepdk45')
chip.add('steplist', 'import')
chip.add('steplist', 'convert')
chip.add('steplist', 'syn')
chip.run()
assert chip.find_result('vg', step='syn') is not None
| 26.066667
| 80
| 0.662404
|
845665f1f68aa18a1d8352723f99a6b4ca8fa81b
| 22,727
|
py
|
Python
|
geotrek/trekking/serializers.py
|
pierreloicq/Geotrek-admin
|
00cd29f29843f2cc25e5a3c7372fcccf14956887
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/trekking/serializers.py
|
pierreloicq/Geotrek-admin
|
00cd29f29843f2cc25e5a3c7372fcccf14956887
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/trekking/serializers.py
|
pierreloicq/Geotrek-admin
|
00cd29f29843f2cc25e5a3c7372fcccf14956887
|
[
"BSD-2-Clause"
] | null | null | null |
import copy
import datetime
import json
import gpxpy.gpx
from django.conf import settings
from django.contrib.gis.db.models.functions import Transform
from django.urls import reverse
from django.utils import translation
from django.utils.translation import get_language, ugettext_lazy as _
from django.utils.timezone import utc, make_aware
from django.utils.xmlutils import SimplerXMLGenerator
from rest_framework import serializers as rest_serializers
from rest_framework_gis import fields as rest_gis_fields
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from mapentity.serializers import GPXSerializer, plain_text
from geotrek.common.serializers import (
PictogramSerializerMixin, ThemeSerializer,
TranslatedModelSerializer, PicturesSerializerMixin,
PublishableSerializerMixin, RecordSourceSerializer,
TargetPortalSerializer
)
from geotrek.authent.serializers import StructureSerializer
from geotrek.cirkwi.models import CirkwiTag
from geotrek.zoning.serializers import ZoningSerializerMixin
from geotrek.altimetry.serializers import AltimetrySerializerMixin
from geotrek.trekking import models as trekking_models
class TrekGPXSerializer(GPXSerializer):
def end_object(self, trek):
super(TrekGPXSerializer, self).end_object(trek)
for poi in trek.published_pois.all():
geom_3d = poi.geom_3d.transform(4326, clone=True) # GPX uses WGS84
wpt = gpxpy.gpx.GPXWaypoint(latitude=geom_3d.y,
longitude=geom_3d.x,
elevation=geom_3d.z)
wpt.name = "%s: %s" % (poi.type, poi.name)
wpt.description = poi.description
self.gpx.waypoints.append(wpt)
class DifficultyLevelSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.ReadOnlyField(source='difficulty')
class Meta:
model = trekking_models.DifficultyLevel
fields = ('id', 'pictogram', 'label')
class RouteSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.ReadOnlyField(source='route')
class Meta:
model = trekking_models.Route
fields = ('id', 'pictogram', 'label')
class NetworkSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
name = rest_serializers.ReadOnlyField(source='network')
class Meta:
model = trekking_models.Route
fields = ('id', 'pictogram', 'name')
class PracticeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.ReadOnlyField(source='name')
class Meta:
model = trekking_models.Practice
fields = ('id', 'pictogram', 'label')
class AccessibilitySerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.ReadOnlyField(source='name')
class Meta:
model = trekking_models.Accessibility
fields = ('id', 'pictogram', 'label')
class TypeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.Practice
fields = ('id', 'pictogram', 'name')
class WebLinkCategorySerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.WebLinkCategory
fields = ('id', 'pictogram', 'label')
class WebLinkSerializer(TranslatedModelSerializer):
category = WebLinkCategorySerializer()
class Meta:
model = trekking_models.WebLink
fields = ('id', 'name', 'category', 'url')
class LabelTrekSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.LabelTrek
fields = ('id', 'pictogram', 'name', 'advice', 'filter_rando')
class CloseTrekSerializer(TranslatedModelSerializer):
category_id = rest_serializers.ReadOnlyField(source='prefixed_category_id')
class Meta:
model = trekking_models.Trek
fields = ('id', 'category_id')
class RelatedTrekSerializer(TranslatedModelSerializer):
pk = rest_serializers.ReadOnlyField(source='id')
category_slug = rest_serializers.SerializerMethodField()
class Meta:
model = trekking_models.Trek
fields = ('id', 'pk', 'slug', 'name', 'category_slug')
def get_category_slug(self, obj):
if settings.SPLIT_TREKS_CATEGORIES_BY_ITINERANCY and obj.children.exists():
# Translators: This is a slug (without space, accent or special char)
return _('itinerancy')
if settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE and obj.practice:
return obj.practice.slug
else:
# Translators: This is a slug (without space, accent or special char)
return _('trek')
class TrekRelationshipSerializer(rest_serializers.ModelSerializer):
published = rest_serializers.ReadOnlyField(source='trek_b.published')
trek = RelatedTrekSerializer(source='trek_b')
class Meta:
model = trekking_models.TrekRelationship
fields = ('has_common_departure', 'has_common_edge', 'is_circuit_step',
'trek', 'published')
class ChildSerializer(TranslatedModelSerializer):
class Meta:
model = trekking_models.Trek
fields = ('id', )
class TrekSerializer(PublishableSerializerMixin, PicturesSerializerMixin,
AltimetrySerializerMixin, ZoningSerializerMixin,
TranslatedModelSerializer):
difficulty = DifficultyLevelSerializer()
route = RouteSerializer()
networks = NetworkSerializer(many=True)
themes = ThemeSerializer(many=True)
practice = PracticeSerializer()
usages = PracticeSerializer(many=True) # Rando v1 compat
accessibilities = AccessibilitySerializer(many=True)
web_links = WebLinkSerializer(many=True)
labels = LabelTrekSerializer(many=True)
relationships = TrekRelationshipSerializer(many=True, source='published_relationships')
treks = CloseTrekSerializer(many=True, source='published_treks')
source = RecordSourceSerializer(many=True)
portal = TargetPortalSerializer(many=True)
children = rest_serializers.ReadOnlyField(source='children_id')
parents = rest_serializers.ReadOnlyField(source='parents_id')
previous = rest_serializers.ReadOnlyField(source='previous_id')
next = rest_serializers.ReadOnlyField(source='next_id')
reservation_system = rest_serializers.ReadOnlyField(source='reservation_system.name', default="")
# Idea: use rest-framework-gis
parking_location = rest_serializers.SerializerMethodField()
points_reference = rest_serializers.SerializerMethodField()
gpx = rest_serializers.SerializerMethodField('get_gpx_url')
kml = rest_serializers.SerializerMethodField('get_kml_url')
structure = StructureSerializer()
# For consistency with touristic contents
type2 = TypeSerializer(source='accessibilities', many=True)
category = rest_serializers.SerializerMethodField()
# Method called to retrieve relevant pictures based on settings
pictures = rest_serializers.SerializerMethodField()
def __init__(self, instance=None, *args, **kwargs):
# duplicate each trek for each one of its accessibilities
if instance and hasattr(instance, '__iter__') and settings.SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY:
treks = []
for trek in instance:
treks.append(trek)
for accessibility in trek.accessibilities.all():
clone = copy.copy(trek)
clone.accessibility = accessibility
treks.append(clone)
instance = treks
super(TrekSerializer, self).__init__(instance, *args, **kwargs)
if settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE:
del self.fields['practice']
if settings.SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY:
del self.fields['type2']
if 'geotrek.tourism' in settings.INSTALLED_APPS:
from geotrek.tourism import serializers as tourism_serializers
self.fields['information_desks'] = tourism_serializers.InformationDeskSerializer(many=True)
self.fields['touristic_contents'] = tourism_serializers.CloseTouristicContentSerializer(many=True, source='published_touristic_contents')
self.fields['touristic_events'] = tourism_serializers.CloseTouristicEventSerializer(many=True, source='published_touristic_events')
if 'geotrek.diving' in settings.INSTALLED_APPS:
from geotrek.diving.serializers import CloseDiveSerializer
self.fields['dives'] = CloseDiveSerializer(many=True, source='published_dives')
class Meta:
model = trekking_models.Trek
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
fields = ('id', 'departure', 'arrival', 'duration', 'duration_pretty',
'description', 'description_teaser', 'networks', 'advice',
'ambiance', 'difficulty', 'information_desks', 'themes',
'labels', 'practice', 'accessibilities', 'usages', 'access', 'route',
'public_transport', 'advised_parking', 'web_links',
'disabled_infrastructure', 'parking_location', 'relationships',
'points_reference', 'gpx', 'kml', 'source', 'portal',
'type2', 'category', 'structure', 'treks', 'reservation_id', 'reservation_system',
'children', 'parents', 'previous', 'next') + \
AltimetrySerializerMixin.Meta.fields + \
ZoningSerializerMixin.Meta.fields + \
PublishableSerializerMixin.Meta.fields + \
PicturesSerializerMixin.Meta.fields
def get_pictures(self, obj):
pictures_list = []
pictures_list.extend(obj.serializable_pictures)
if settings.TREK_WITH_POIS_PICTURES:
for poi in obj.published_pois:
pictures_list.extend(poi.serializable_pictures)
return pictures_list
def get_parking_location(self, obj):
if not obj.parking_location:
return None
point = obj.parking_location.transform(settings.API_SRID, clone=True)
return [round(point.x, 7), round(point.y, 7)]
def get_points_reference(self, obj):
if not obj.points_reference:
return None
geojson = obj.points_reference.transform(settings.API_SRID, clone=True).geojson
return json.loads(geojson)
def get_gpx_url(self, obj):
return reverse('trekking:trek_gpx_detail', kwargs={'lang': get_language(), 'pk': obj.pk, 'slug': obj.slug})
def get_kml_url(self, obj):
return reverse('trekking:trek_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk, 'slug': obj.slug})
def get_category(self, obj):
if settings.SPLIT_TREKS_CATEGORIES_BY_ITINERANCY and obj.children.exists():
data = {
'id': 'I',
'label': _("Itinerancy"),
'pictogram': '/static/trekking/itinerancy.svg',
# Translators: This is a slug (without space, accent or special char)
'slug': _('itinerancy'),
}
elif settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE and obj.practice:
data = {
'id': obj.practice.prefixed_id,
'label': obj.practice.name,
'pictogram': obj.practice.get_pictogram_url(),
'slug': obj.practice.slug,
}
else:
data = {
'id': trekking_models.Practice.id_prefix,
'label': _("Hike"),
'pictogram': '/static/trekking/trek.svg',
# Translators: This is a slug (without space, accent or special char)
'slug': _('trek'),
}
if settings.SPLIT_TREKS_CATEGORIES_BY_ITINERANCY and obj.children.exists():
data['order'] = settings.ITINERANCY_CATEGORY_ORDER
elif settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE:
data['order'] = obj.practice and obj.practice.order
else:
data['order'] = settings.TREK_CATEGORY_ORDER
if not settings.SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY:
data['type2_label'] = obj._meta.get_field('accessibilities').verbose_name
return data
class TrekGeojsonSerializer(GeoFeatureModelSerializer, TrekSerializer):
# Annotated geom field with API_SRID
api_geom = rest_gis_fields.GeometryField(read_only=True, precision=7)
class Meta(TrekSerializer.Meta):
geo_field = 'api_geom'
fields = TrekSerializer.Meta.fields + ('api_geom', )
class POITypeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.POIType
fields = ('id', 'pictogram', 'label')
class ClosePOISerializer(TranslatedModelSerializer):
type = POITypeSerializer()
class Meta:
model = trekking_models.Trek
fields = ('id', 'slug', 'name', 'type')
class POISerializer(PublishableSerializerMixin, PicturesSerializerMixin,
ZoningSerializerMixin, TranslatedModelSerializer):
type = POITypeSerializer()
structure = StructureSerializer()
class Meta:
model = trekking_models.Trek
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
fields = ('id', 'description', 'type',) + \
('min_elevation', 'max_elevation', 'structure') + \
ZoningSerializerMixin.Meta.fields + \
PublishableSerializerMixin.Meta.fields + \
PicturesSerializerMixin.Meta.fields
class POIGeojsonSerializer(GeoFeatureModelSerializer, POISerializer):
# Annotated geom field with API_SRID
api_geom = rest_gis_fields.GeometryField(read_only=True, precision=7)
class Meta(POISerializer.Meta):
geo_field = 'api_geom'
fields = POISerializer.Meta.fields + ('api_geom', )
class ServiceTypeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.ServiceType
fields = ('id', 'pictogram', 'name')
class ServiceSerializer(rest_serializers.ModelSerializer):
type = ServiceTypeSerializer()
structure = StructureSerializer()
class Meta:
model = trekking_models.Service
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
fields = ('id', 'type', 'structure')
class ServiceGeojsonSerializer(GeoFeatureModelSerializer, ServiceSerializer):
# Annotated geom field with API_SRID
api_geom = rest_gis_fields.GeometryField(read_only=True, precision=7)
class Meta(ServiceSerializer.Meta):
geo_field = 'api_geom'
fields = ServiceSerializer.Meta.fields + ('api_geom', )
def timestamp(dt):
epoch = make_aware(datetime.datetime(1970, 1, 1), utc)
return str(int((dt - epoch).total_seconds()))
class CirkwiPOISerializer(object):
def __init__(self, request, stream):
self.xml = SimplerXMLGenerator(stream, 'utf8')
self.request = request
self.stream = stream
def serialize_field(self, name, value, attrs={}):
if not value and not attrs:
return
value = str(value)
self.xml.startElement(name, attrs)
if '<' in value or u'>' in value or '&' in value:
self.stream.write('<![CDATA[%s]]>' % value)
else:
self.xml.characters(value)
self.xml.endElement(name)
def serialize_medias(self, request, pictures):
if not pictures:
return
self.xml.startElement('medias', {})
self.xml.startElement('images', {})
for picture in pictures[:10]:
self.xml.startElement('image', {})
if picture['legend']:
self.serialize_field('legende', picture['legend'])
self.serialize_field('url', request.build_absolute_uri(picture['url']))
if picture['author']:
self.serialize_field('credit', picture['author'])
self.xml.endElement('image')
self.xml.endElement('images')
self.xml.endElement('medias')
def serialize_pois(self, pois):
if not pois:
return
for poi in pois:
self.xml.startElement('poi', {
'date_creation': timestamp(poi.date_insert),
'date_modification': timestamp(poi.date_update),
'id_poi': str(poi.pk),
})
if poi.type.cirkwi:
self.xml.startElement('categories', {})
self.serialize_field('categorie', str(poi.type.cirkwi.eid), {'nom': poi.type.cirkwi.name})
self.xml.endElement('categories')
orig_lang = translation.get_language()
self.xml.startElement('informations', {})
for lang in poi.published_langs:
translation.activate(lang)
self.xml.startElement('information', {'langue': lang})
self.serialize_field('titre', poi.name)
self.serialize_field('description', plain_text(poi.description))
self.serialize_medias(self.request, poi.serializable_pictures)
self.xml.endElement('information')
translation.activate(orig_lang)
self.xml.endElement('informations')
self.xml.startElement('adresse', {})
self.xml.startElement('position', {})
coords = poi.geom.transform(4326, clone=True).coords
self.serialize_field('lat', round(coords[1], 7))
self.serialize_field('lng', round(coords[0], 7))
self.xml.endElement('position')
self.xml.endElement('adresse')
self.xml.endElement('poi')
def serialize(self, pois):
self.xml.startDocument()
self.xml.startElement('pois', {'version': '2'})
self.serialize_pois(pois)
self.xml.endElement('pois')
self.xml.endDocument()
class CirkwiTrekSerializer(CirkwiPOISerializer):
ADDITIONNAL_INFO = ('departure', 'arrival', 'ambiance', 'access', 'disabled_infrastructure',
'advised_parking', 'public_transport', 'advice')
def __init__(self, request, stream, get_params=None):
super(CirkwiTrekSerializer, self).__init__(request, stream)
self.request = request
self.exclude_pois = get_params.get('withoutpois', None)
def serialize_additionnal_info(self, trek, name):
value = getattr(trek, name)
if not value:
return
value = plain_text(value)
self.xml.startElement('information_complementaire', {})
self.serialize_field('titre', trek._meta.get_field(name).verbose_name)
self.serialize_field('description', value)
self.xml.endElement('information_complementaire')
def serialize_locomotions(self, trek):
attrs = {}
if trek.practice and trek.practice.cirkwi:
attrs['type'] = trek.practice.cirkwi.name
attrs['id_locomotion'] = str(trek.practice.cirkwi.eid)
if trek.difficulty and trek.difficulty.cirkwi_level:
attrs['difficulte'] = str(trek.difficulty.cirkwi_level)
if trek.duration:
attrs['duree'] = str(int(trek.duration * 3600))
if attrs:
self.xml.startElement('locomotions', {})
self.serialize_field('locomotion', '', attrs)
self.xml.endElement('locomotions')
def serialize_description(self, trek):
description = trek.description_teaser
if description and trek.description:
description += '\n\n'
description += trek.description
if description:
self.serialize_field('description', plain_text(description))
def serialize_tags(self, trek):
tag_ids = list(trek.themes.filter(cirkwi_id__isnull=False).values_list('cirkwi_id', flat=True))
tag_ids += trek.accessibilities.filter(cirkwi_id__isnull=False).values_list('cirkwi_id', flat=True)
if trek.difficulty and trek.difficulty.cirkwi_id:
tag_ids.append(trek.difficulty.cirkwi_id)
if tag_ids:
self.xml.startElement('tags_publics', {})
for tag in CirkwiTag.objects.filter(id__in=tag_ids):
self.serialize_field('tag_public', '', {'id': str(tag.eid), 'nom': tag.name})
self.xml.endElement('tags_publics')
# TODO: parking location (POI?), points_reference
def serialize(self, treks):
self.xml.startDocument()
self.xml.startElement('circuits', {'version': '2'})
for trek in treks:
self.xml.startElement('circuit', {
'date_creation': timestamp(trek.date_insert),
'date_modification': timestamp(trek.date_update),
'id_circuit': str(trek.pk),
})
orig_lang = translation.get_language()
self.xml.startElement('informations', {})
for lang in trek.published_langs:
translation.activate(lang)
self.xml.startElement('information', {'langue': lang})
self.serialize_field('titre', trek.name)
self.serialize_description(trek)
self.serialize_medias(self.request, trek.serializable_pictures)
if any([getattr(trek, name) for name in self.ADDITIONNAL_INFO]):
self.xml.startElement('informations_complementaires', {})
for name in self.ADDITIONNAL_INFO:
self.serialize_additionnal_info(trek, name)
self.xml.endElement('informations_complementaires')
self.serialize_tags(trek)
self.xml.endElement('information')
translation.activate(orig_lang)
self.xml.endElement('informations')
self.serialize_field('distance', int(trek.length))
self.serialize_locomotions(trek)
kml_url = reverse('trekking:trek_kml_detail',
kwargs={'lang': get_language(), 'pk': trek.pk, 'slug': trek.slug})
self.serialize_field('fichier_trace', '', {'url': self.request.build_absolute_uri(kml_url)})
if not self.exclude_pois:
if trek.published_pois:
self.xml.startElement('pois', {})
self.serialize_pois(trek.published_pois.annotate(transformed_geom=Transform('geom', 4326)))
self.xml.endElement('pois')
self.xml.endElement('circuit')
self.xml.endElement('circuits')
self.xml.endDocument()
| 41.624542
| 149
| 0.659568
|
81a5449479574f681461a1b6e881ba84c4f53baa
| 4,615
|
py
|
Python
|
semeval_eval.py
|
jfarrugia-uom/hyperstar
|
896db10da2506e5144b99361dfb43609edf05012
|
[
"MIT"
] | null | null | null |
semeval_eval.py
|
jfarrugia-uom/hyperstar
|
896db10da2506e5144b99361dfb43609edf05012
|
[
"MIT"
] | null | null | null |
semeval_eval.py
|
jfarrugia-uom/hyperstar
|
896db10da2506e5144b99361dfb43609edf05012
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import numpy as np
class HypernymEvaluation:
def __init__(self, dataset):
self.dataset = dataset
def convert_hypernyms_to_one_line(self):
#ordered_queries = sorted(list(set(self.dataset[0])))
ordered_queries = sorted(list(set([x for (x,y) in self.dataset])))
one_line = {}
for w in ordered_queries:
word_hypernyms = [h for q, h in self.dataset if q == w]
one_line[w] = word_hypernyms
return one_line
# taken from task_scorer.py provided with shared task resources
def mean_reciprocal_rank(self, r):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
r = np.asarray(r).nonzero()[0]
return 1. / (r[0] + 1) if r.size else 0.
def precision_at_k(self, r, k, n):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return (np.mean(r)*k)/min(k,n)
# Modified from the first version. Now the gold elements are taken into account
def average_precision(self, r,n):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [self.precision_at_k(r, k + 1, n) for k in range(r.size)]
#Modified from the first version (removed "if r[k]"). All elements (zero and nonzero) are taken into account
if not out:
return 0.
return np.mean(out)
def mean_average_precision(self, r, n):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return self.average_precision(r,n)
# predictions is a dictionary whereby key is query term and value is a list of ranked hypernym predictions
def get_evaluation_scores(self, predictions):
all_scores = []
scores_names = ['MRR', 'MAP', 'P@1', 'P@5', 'P@10']
for query, gold_hyps in self.convert_hypernyms_to_one_line().items():
avg_pat1 = []
avg_pat2 = []
avg_pat3 = []
pred_hyps = predictions[query]
gold_hyps_n = len(gold_hyps)
r = [0 for i in range(15)]
for j in range(len(pred_hyps)):
# I believe it's not fair to bias evaluation on how many hypernyms were found in gold set
# if anything a shorter list (ex. because a hypernym is very particular) will already make
# it harder for a match to be found but if system returns correct hypernym in second place
# why should it be ignored?
if j < gold_hyps_n:
pred_hyp = pred_hyps[j]
if pred_hyp in gold_hyps:
r[j] = 1
avg_pat1.append(self.precision_at_k(r,1,gold_hyps_n))
avg_pat2.append(self.precision_at_k(r,5,gold_hyps_n))
avg_pat3.append(self.precision_at_k(r,10,gold_hyps_n))
mrr_score_numb = self.mean_reciprocal_rank(r)
map_score_numb = self.mean_average_precision(r,gold_hyps_n)
avg_pat1_numb = sum(avg_pat1)/len(avg_pat1)
avg_pat2_numb = sum(avg_pat2)/len(avg_pat2)
avg_pat3_numb = sum(avg_pat3)/len(avg_pat3)
score_results = [mrr_score_numb, map_score_numb, avg_pat1_numb, avg_pat2_numb, avg_pat3_numb]
all_scores.append(score_results)
return scores_names, all_scores
| 40.840708
| 116
| 0.582665
|
53478a031915d03ad1f0d4ee1f0352f1d48a497b
| 1,710
|
py
|
Python
|
fast_campus_craw_project/crawler/crawler/spiders/spider.py
|
Taebyoung/crawling_season_1
|
afbf2c7a1c94625f41e78bcac596db103bef1aaa
|
[
"MIT"
] | 1
|
2019-12-13T06:06:14.000Z
|
2019-12-13T06:06:14.000Z
|
fast_campus_craw_project/crawler/crawler/spiders/spider.py
|
Taebyoung/crawling_season_1
|
afbf2c7a1c94625f41e78bcac596db103bef1aaa
|
[
"MIT"
] | null | null | null |
fast_campus_craw_project/crawler/crawler/spiders/spider.py
|
Taebyoung/crawling_season_1
|
afbf2c7a1c94625f41e78bcac596db103bef1aaa
|
[
"MIT"
] | null | null | null |
import scrapy
import requests
import json
from crawler.items import CrawlerItem
class Spider(scrapy.Spider):
name = "KBO"
def start_requests(self):
year = ["2017", "2018", "2019"]
month = ["03","04","05", "06", "07", "08", "09", "10"]
team = ["SK", "HH", "WO", "HT", "SS", "LT", "OB", "LG","KT","NC"]
for self.y in year:
for m in month:
for t in team:
url = "https://www.koreabaseball.com/ws/Schedule.asmx/GetScheduleList?leId=1&srIdList=0%2C9&seasonId={}&gameMonth={}&teamId={}".format(self.y, m, t)
yield scrapy.Request(url, callback=self.match_parse)
def match_parse(self, response):
item = CrawlerItem()
for n in range(0,31):
try:
item["year"] = self.y
item["dates"] = json.loads(response.body)["rows"][n]['row'][0]['Text'] #날짜
time = json.loads(response.body)["rows"][n]['row'][1]['Text'] #시간
item["times"] = time.replace("<b>", "").replace("</b>", "")
result = json.loads(response.body)["rows"][n]['row'][2]['Text'] #경기결과
item["results"] = result.replace('<span class="win">',' win ').replace('<span class="lose">',' lose ').replace('<span class="same">',' same ').replace('</span><span>',' ').replace('</span></em><span>',' ').replace('<span>','').replace('</span>','').replace('<em>','').replace('</em>','')
item["parks"] = json.loads(response.body)["rows"][n]['row'][7]['Text'] #구장
item["etcs"] = json.loads(response.body)["rows"][n]['row'][8]['Text'] #비고
yield item
except:
break
| 48.857143
| 303
| 0.506433
|
dc6766ab00766cc2091f3b8dd67e6813a4fd8aef
| 5,890
|
py
|
Python
|
CookieTTS/utils/torchmoji/filter_utils.py
|
AstraliteHeart/cookietts
|
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
|
[
"BSD-3-Clause"
] | 25
|
2020-07-07T20:07:41.000Z
|
2021-12-17T11:27:36.000Z
|
CookieTTS/utils/torchmoji/filter_utils.py
|
AstraliteHeart/cookietts
|
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
|
[
"BSD-3-Clause"
] | 26
|
2020-07-04T00:06:25.000Z
|
2022-02-10T03:28:35.000Z
|
CookieTTS/utils/torchmoji/filter_utils.py
|
AstraliteHeart/cookietts
|
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
|
[
"BSD-3-Clause"
] | 11
|
2020-07-02T21:39:59.000Z
|
2022-01-17T22:09:46.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
import sys
import re
import string
import emoji
from itertools import groupby
import numpy as np
from CookieTTS.utils.torchmoji.tokenizer import RE_MENTION, RE_URL
from CookieTTS.utils.torchmoji.global_variables import SPECIAL_TOKENS
try:
unichr # Python 2
except NameError:
unichr = chr # Python 3
AtMentionRegex = re.compile(RE_MENTION)
urlRegex = re.compile(RE_URL)
# from http://bit.ly/2rdjgjE (UTF-8 encodings and Unicode chars)
VARIATION_SELECTORS = [ '\ufe00',
'\ufe01',
'\ufe02',
'\ufe03',
'\ufe04',
'\ufe05',
'\ufe06',
'\ufe07',
'\ufe08',
'\ufe09',
'\ufe0a',
'\ufe0b',
'\ufe0c',
'\ufe0d',
'\ufe0e',
'\ufe0f']
# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
ALL_CHARS = (unichr(i) for i in range(sys.maxunicode))
CONTROL_CHARS = ''.join(map(unichr, list(range(0,32)) + list(range(127,160))))
CONTROL_CHAR_REGEX = re.compile('[%s]' % re.escape(CONTROL_CHARS))
def is_special_token(word):
equal = False
for spec in SPECIAL_TOKENS:
if word == spec:
equal = True
break
return equal
def mostly_english(words, english, pct_eng_short=0.5, pct_eng_long=0.6, ignore_special_tokens=True, min_length=2):
""" Ensure text meets threshold for containing English words """
n_words = 0
n_english = 0
if english is None:
return True, 0, 0
for w in words:
if len(w) < min_length:
continue
if punct_word(w):
continue
if ignore_special_tokens and is_special_token(w):
continue
n_words += 1
if w in english:
n_english += 1
if n_words < 2:
return True, n_words, n_english
if n_words < 5:
valid_english = n_english >= n_words * pct_eng_short
else:
valid_english = n_english >= n_words * pct_eng_long
return valid_english, n_words, n_english
def correct_length(words, min_words, max_words, ignore_special_tokens=True):
""" Ensure text meets threshold for containing English words
and that it's within the min and max words limits. """
if min_words is None:
min_words = 0
if max_words is None:
max_words = 99999
n_words = 0
for w in words:
if punct_word(w):
continue
if ignore_special_tokens and is_special_token(w):
continue
n_words += 1
valid = min_words <= n_words and n_words <= max_words
return valid
def punct_word(word, punctuation=string.punctuation):
return all([True if c in punctuation else False for c in word])
def load_non_english_user_set():
non_english_user_set = set(np.load('uids.npz')['data'])
return non_english_user_set
def non_english_user(userid, non_english_user_set):
neu_found = int(userid) in non_english_user_set
return neu_found
def separate_emojis_and_text(text):
emoji_chars = []
non_emoji_chars = []
for c in text:
if c in emoji.UNICODE_EMOJI:
emoji_chars.append(c)
else:
non_emoji_chars.append(c)
return ''.join(emoji_chars), ''.join(non_emoji_chars)
def extract_emojis(text, wanted_emojis):
text = remove_variation_selectors(text)
return [c for c in text if c in wanted_emojis]
def remove_variation_selectors(text):
""" Remove styling glyph variants for Unicode characters.
For instance, remove skin color from emojis.
"""
for var in VARIATION_SELECTORS:
text = text.replace(var, '')
return text
def shorten_word(word):
""" Shorten groupings of 3+ identical consecutive chars to 2, e.g. '!!!!' --> '!!'
"""
# only shorten ASCII words
try:
word.decode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError, AttributeError) as e:
return word
# must have at least 3 char to be shortened
if len(word) < 3:
return word
# find groups of 3+ consecutive letters
letter_groups = [list(g) for k, g in groupby(word)]
triple_or_more = [''.join(g) for g in letter_groups if len(g) >= 3]
if len(triple_or_more) == 0:
return word
# replace letters to find the short word
short_word = word
for trip in triple_or_more:
short_word = short_word.replace(trip, trip[0]*2)
return short_word
def detect_special_tokens(word):
try:
int(word)
word = SPECIAL_TOKENS[4]
except ValueError:
if AtMentionRegex.findall(word):
word = SPECIAL_TOKENS[2]
elif urlRegex.findall(word):
word = SPECIAL_TOKENS[3]
return word
def process_word(word):
""" Shortening and converting the word to a special token if relevant.
"""
word = shorten_word(word)
word = detect_special_tokens(word)
return word
def remove_control_chars(text):
return CONTROL_CHAR_REGEX.sub('', text)
def convert_nonbreaking_space(text):
# ugly hack handling non-breaking space no matter how badly it's been encoded in the input
for r in ['\\\\xc2', '\\xc2', '\xc2', '\\\\xa0', '\\xa0', '\xa0']:
text = text.replace(r, ' ')
return text
def convert_linebreaks(text):
# ugly hack handling non-breaking space no matter how badly it's been encoded in the input
# space around to ensure proper tokenization
for r in ['\\\\n', '\\n', '\n', '\\\\r', '\\r', '\r', '<br>']:
text = text.replace(r, ' ' + SPECIAL_TOKENS[5] + ' ')
return text
| 30.205128
| 114
| 0.615959
|
43348e3b2c545b50b18b1fe875ed23f49caec133
| 1,073
|
py
|
Python
|
ngraph/python/src/ngraph/exceptions.py
|
szabi-luxonis/openvino
|
c8dd831fc3ba68a256ab47edb4f6bf3cb5e804be
|
[
"Apache-2.0"
] | 2
|
2020-11-18T14:14:06.000Z
|
2020-11-28T04:55:57.000Z
|
ngraph/python/src/ngraph/exceptions.py
|
szabi-luxonis/openvino
|
c8dd831fc3ba68a256ab47edb4f6bf3cb5e804be
|
[
"Apache-2.0"
] | 30
|
2020-11-13T11:44:07.000Z
|
2022-02-21T13:03:16.000Z
|
ngraph/python/src/ngraph/exceptions.py
|
szabi-luxonis/openvino
|
c8dd831fc3ba68a256ab47edb4f6bf3cb5e804be
|
[
"Apache-2.0"
] | 1
|
2020-12-18T15:47:45.000Z
|
2020-12-18T15:47:45.000Z
|
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""! ngraph exceptions hierarchy. All exceptions are descendants of NgraphError."""
class NgraphError(Exception):
"""! Base class for Ngraph exceptions."""
class UserInputError(NgraphError):
"""! User provided unexpected input."""
class NgraphTypeError(NgraphError, TypeError):
"""! Type mismatch error."""
| 37
| 83
| 0.634669
|
f8ace892ae24814e4b82aa9e1fe62f7749e39978
| 1,212
|
py
|
Python
|
arduino/portable/sketchbook/libraries/RF24/pyRF24/setup.py
|
devshop2019/mixlyTest
|
bb92771aca2d5d801510658a70a13f4b548a43aa
|
[
"Apache-2.0"
] | 118
|
2019-03-27T02:15:59.000Z
|
2022-03-18T16:42:42.000Z
|
arduino/portable/sketchbook/libraries/RF24/pyRF24/setup.py
|
devshop2019/mixlyTest
|
bb92771aca2d5d801510658a70a13f4b548a43aa
|
[
"Apache-2.0"
] | 50
|
2015-02-27T13:21:05.000Z
|
2016-01-11T01:03:42.000Z
|
arduino/portable/sketchbook/libraries/RF24/pyRF24/setup.py
|
devshop2019/mixlyTest
|
bb92771aca2d5d801510658a70a13f4b548a43aa
|
[
"Apache-2.0"
] | 58
|
2019-04-16T06:52:45.000Z
|
2022-03-08T01:57:08.000Z
|
#!/usr/bin/env python
import os
import sys
import setuptools
import crossunixccompiler
version = ''
def process_configparams():
global version
with open('../Makefile.inc') as f:
config_lines = f.read().splitlines()
cflags = os.getenv("CFLAGS", "")
for line in config_lines:
identifier, value = line.split('=', 1)
if identifier == "CPUFLAGS":
cflags += " " + value
elif identifier == "HEADER_DIR":
cflags += " -I" + os.path.dirname(value)
elif identifier == "LIB_DIR":
cflags += " -L" + value
elif identifier == "LIB_VERSION":
version = value
elif identifier in ("CC", "CXX"):
os.environ[identifier] = value
os.environ["CFLAGS"] = cflags
if sys.version_info >= (3,):
BOOST_LIB = 'boost_python3'
else:
BOOST_LIB = 'boost_python'
process_configparams()
crossunixccompiler.register()
module_RF24 = setuptools.Extension('RF24',
libraries=['rf24', BOOST_LIB],
sources=['pyRF24.cpp'])
setuptools.setup(name='RF24',
version=version,
ext_modules=[module_RF24])
| 24.734694
| 65
| 0.567657
|
bbb008e64bca47867b41d28e9fdc9068bdb04574
| 2,546
|
py
|
Python
|
tests/integrationTests/tests/cpp_buggy_custom/__init__.py
|
muarachmann/Submitty
|
86cea3d3441419089b1a3058d01c663e6807294c
|
[
"BSD-3-Clause"
] | 3
|
2020-07-27T16:23:09.000Z
|
2022-01-07T16:07:31.000Z
|
tests/integrationTests/tests/cpp_buggy_custom/__init__.py
|
muarachmann/Submitty
|
86cea3d3441419089b1a3058d01c663e6807294c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integrationTests/tests/cpp_buggy_custom/__init__.py
|
muarachmann/Submitty
|
86cea3d3441419089b1a3058d01c663e6807294c
|
[
"BSD-3-Clause"
] | null | null | null |
# Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_INSTALL_DIR
import subprocess
import os
import glob
import shutil
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_INSTALL_DIR + "/more_autograding_examples/cpp_buggy_custom/config"
SAMPLE_SUBMISSIONS = SUBMITTY_INSTALL_DIR + "/more_autograding_examples/cpp_buggy_custom/submissions"
@prebuild
def initialize(test):
try:
os.mkdir(os.path.join(test.testcase_path, "assignment_config"))
except OSError:
pass
try:
data_path = os.path.join(test.testcase_path, "data")
if os.path.isdir(data_path):
shutil.rmtree(data_path)
os.mkdir(data_path)
except OSError:
pass
try:
os.mkdir(os.path.join(test.testcase_path, "build"))
except OSError:
pass
try:
os.mkdir(os.path.join(test.testcase_path, "build/custom_validation_code"))
except OSError:
pass
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "config.json"),
os.path.join(test.testcase_path, "assignment_config")])
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "instructor_CMakeLists.txt"),
os.path.join(test.testcase_path, "build")])
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "custom_validation_code", "grader.cpp"),
os.path.join(test.testcase_path, "build/custom_validation_code")])
############################################################################
def cleanup(test):
subprocess.call(["rm"] + ["-rf"] +
glob.glob(os.path.join(test.testcase_path, "data", "test*")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "results*")))
@testcase
def correct(test):
cleanup(test)
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data/", "*.cpp")))
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "correct.cpp"),
os.path.join(test.testcase_path, "data/")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","grade.txt_correct","-b")
test.json_diff("results.json","results.json_correct")
| 35.859155
| 107
| 0.597015
|
f9bd899f3f44eaf642db9bf4f32b4efcc534187a
| 313
|
py
|
Python
|
lab3/palindrome.py
|
Dovydas-Kr/dt211c-cloud-repo
|
cc1e40f68da5048d9ceeea1617dd08293e013ac0
|
[
"MIT"
] | null | null | null |
lab3/palindrome.py
|
Dovydas-Kr/dt211c-cloud-repo
|
cc1e40f68da5048d9ceeea1617dd08293e013ac0
|
[
"MIT"
] | null | null | null |
lab3/palindrome.py
|
Dovydas-Kr/dt211c-cloud-repo
|
cc1e40f68da5048d9ceeea1617dd08293e013ac0
|
[
"MIT"
] | null | null | null |
# Program that checks if a string is palindrome.
import string
#Asks for input
word = raw_input("Enter a string: ")
#changes string to lover case
word1 = word.lower()
#reverses string
word2 = reversed(word1)
#Checks if strings are the same
if list(word1) == list(word2):
print("True")
else:
print("False")
| 20.866667
| 48
| 0.71885
|
b79db24da371797a985afbe7993ff2334ee38df9
| 271
|
py
|
Python
|
sources/controller/__init__.py
|
Groomsha/lan-map
|
1c30819470f43f8521e98eb75c70da23939f8f06
|
[
"Apache-2.0"
] | null | null | null |
sources/controller/__init__.py
|
Groomsha/lan-map
|
1c30819470f43f8521e98eb75c70da23939f8f06
|
[
"Apache-2.0"
] | null | null | null |
sources/controller/__init__.py
|
Groomsha/lan-map
|
1c30819470f43f8521e98eb75c70da23939f8f06
|
[
"Apache-2.0"
] | null | null | null |
from .main_window.main_window_controller import *
from .new_device_window.new_device_controller import *
from .new_device_window.save_data_new_device import *
from .new_device_window.button_new_device import *
from .new_device_window.widgets_control_new_device import *
| 38.714286
| 59
| 0.867159
|
275949a93785cf146900b7939bde60a58e77e665
| 6,416
|
py
|
Python
|
danksales/danksales.py
|
OofChair/AndyCogs
|
0ccc6c3eba6f66051a9acf85fee765aae62c985b
|
[
"MIT"
] | 8
|
2021-01-26T19:44:13.000Z
|
2021-08-03T00:11:39.000Z
|
danksales/danksales.py
|
OofChair/AndyCogs
|
0ccc6c3eba6f66051a9acf85fee765aae62c985b
|
[
"MIT"
] | 6
|
2021-03-02T16:59:40.000Z
|
2021-07-21T06:26:00.000Z
|
danksales/danksales.py
|
OofChair/AndyCogs
|
0ccc6c3eba6f66051a9acf85fee765aae62c985b
|
[
"MIT"
] | 6
|
2021-02-11T20:35:10.000Z
|
2021-08-07T07:40:17.000Z
|
import discord
import re
from datetime import datetime
from redbot.core import commands, Config
from redbot.core.bot import Red
from typing import Optional
SHOP_REGEX = r"\*\*__LIGHTNING SALE__\*\* \(resets in (?P<time>[0-9,]+)m\) :[a-zA-Z0-9_]{2,32}: \*\*(?P<item>.*[a-zA-Z0-9_]{2,32})\*\* ─ \[(?P<price>[0-9,]+)\] \(\[\*\*\*(?P<percent>[0-9,]+)% OFF!\*\*\*\]\)\*(?P<description>\w.*)\*"
WEBHOOK_REGEX = r"\*\*(?P<item>.*[a-zA-Z0-9_]{2,32})\*\* ─ \[(?P<price>[0-9,]+)\] \(\[\*\*\*(?P<percent>[0-9,]+)% OFF!\*\*\*\]\)\*(?P<description>\w.*)\*"
class DankSales(commands.Cog):
"""Post sales and view stats about dankmemer item sales"""
def __init__(self, bot: Red):
self.bot = bot
self.config = Config.get_conf(self, 160805014090190130501014, True)
default_guild = {
"channel": None,
"pingrole": None,
"rate": 50,
}
default_global = {
"lastitem": None,
"lastpercent": None,
}
self.config.register_guild(**default_guild)
self.config.register_global(**default_global)
item_dict = {
"Alcohol": 818709704762851339,
}
def sub(self, match):
return f":{match.group('name')}:"
@commands.group(aliases=["danksales"])
@commands.mod_or_permissions(manage_guild=True)
async def danksale(self, ctx: commands.Context):
"""Set server settings for dankmemer shop sales"""
pass
@danksale.command()
async def channel(self, ctx, channel: Optional[discord.TextChannel] = None):
"""Set the channel to send shop sales to"""
if not channel:
await self.config.guild(ctx.guild).channel.clear()
await ctx.send("Cleared the channel")
else:
await self.config.guild(ctx.guild).channel.set(channel.id)
await ctx.send(f"Now sending dankmemer shop sales to {channel.mention}")
@danksale.command(aliases=["role"])
async def pingrole(
self, ctx: commands.Context, role: Optional[discord.Role] = None
):
"""Set the role to ping if the rate is over the configured rate"""
if not role:
await self.config.guild(ctx.guild).pingrole.clear()
await ctx.send("No longer using a pingrole")
else:
await self.config.guild(ctx.guild).pingrole.set(role.id)
await ctx.send(f"I will now ping `{role.name}` for shop sales")
@danksale.command()
async def rate(self, ctx: commands.Context, rate: int):
"""Set the rate for the bot to ping the pingrole for sales"""
if rate <= 0 or rate >= 90:
return await ctx.send("The rate should be above 1 and less than 90")
await self.config.guild(ctx.guild).rate.set(rate)
await ctx.send("Updated the rate")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if not message.author.id == 270904126974590976 and message.webhook_id is None:
return "not dank"
if not message.embeds:
return "no embeds"
try:
if not "LIGHTNING SALE" in str(message.embeds[0].description):
if message.webhook_id is None:
return "Ligthing sale not in description"
except (IndexError, TypeError):
return "indexerror"
all_data = await self.config.all()
replace_list = [
"⏣ ",
"(https://www.youtube.com/watch?v=_BD140nCDps)",
"(https://www.youtube.com/watch?v=WPkMUU9tUqk)",
"\n",
]
filtered_message = str(message.embeds[0].description)
for item in replace_list:
filtered_message = filtered_message.replace(item, "")
filtered_message = re.sub(
"<(?P<animated>a?):(?P<name>[a-zA-Z0-9_]{2,32}):(?P<id>[0-9]{18,22})>",
self.sub,
filtered_message,
)
filtered_message = filtered_message.strip()
if message.webhook_id is None:
match = re.match(SHOP_REGEX, filtered_message)
else:
match = re.match(WEBHOOK_REGEX, filtered_message)
if not match:
return "no match"
if all_data["lastitem"] == match.group("item") and all_data["lastpercent"] == match.group("percent"):
return
await self.config.lastitem.set(match.group("item"))
await self.config.lastpercent.set(match.group("percent"))
all_guilds = await self.config.all_guilds()
for guild_id, data in all_guilds.items():
if data["channel"]:
e = discord.Embed(
title="LIGHTNING SALE",
color=discord.Color.blurple(),
description=f"**{match.group('item')}** ─ [⏣ {match.group('price')}](https://www.youtube.com/watch?v=_BD140nCDps)\n",
)
e.description += match.group("description")
channel = self.bot.get_channel(data["channel"])
content = ""
if int(match.group("percent")) >= data["rate"]:
role = data["pingrole"]
if not role:
pass
else:
guild = self.bot.get_guild(int(guild_id))
if not guild:
pass
else:
role = guild.get_role(data["pingrole"])
if not role:
pass
else:
content += f"{role.mention}: "
content += f"**{match.group('item')}** is on sale at **{match.group('percent')}%** off"
allowed_mentions = discord.AllowedMentions(roles=True)
try:
m = await channel.send(
content=content, embed=e, allowed_mentions=allowed_mentions
)
except (
discord.errors.Forbidden,
discord.NotFound,
discord.HTTPException,
):
pass
else:
try:
await m.publish()
except (discord.Forbidden, discord.HTTPException):
pass
| 38.419162
| 233
| 0.528834
|
742614aeaf8daad749d28551f1a159fb4f01f65a
| 12,273
|
py
|
Python
|
src/spec-2017/configs/run_spec.py
|
my569/gem5-resources
|
5788f1394a1894efec8e4784d37f473a743aa9f6
|
[
"MIT"
] | 1
|
2022-01-04T03:41:23.000Z
|
2022-01-04T03:41:23.000Z
|
src/spec-2017/configs/run_spec.py
|
my569/gem5-resources
|
5788f1394a1894efec8e4784d37f473a743aa9f6
|
[
"MIT"
] | null | null | null |
src/spec-2017/configs/run_spec.py
|
my569/gem5-resources
|
5788f1394a1894efec8e4784d37f473a743aa9f6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 The Regents of the University of California.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power, Ayaz Akram, Hoa Nguyen
""" Script to run a SPEC benchmark in full system mode with gem5.
Inputs:
* This script expects the following as arguments:
** kernel:
This is a positional argument specifying the path to
vmlinux.
** disk:
This is a positional argument specifying the path to the
disk image containing the installed SPEC benchmarks.
** cpu:
This is a positional argument specifying the name of the
detailed CPU model. The names of the available CPU models
are available in the getDetailedCPUModel(cpu_name) function.
The function should be modified to add new CPU models.
Currently, the available CPU models are:
- kvm: this is not a detailed CPU model, ideal for testing.
- o3: DerivO3CPU.
- atomic: AtomicSimpleCPU.
- timing: TimingSimpleCPU.
** benchmark:
This is a positional argument specifying the name of the
SPEC benchmark to run. Most SPEC benchmarks are available.
Please follow this link to check the availability of the
benchmarks. The working benchmark matrix is near the end
of the page:
(SPEC 2006) https://gem5art.readthedocs.io/en/latest/tutorials/spec2006-tutorial.html#appendix-i-working-spec-2006-benchmarks-x-cpu-model-table
(SPEC 2017) https://gem5art.readthedocs.io/en/latest/tutorials/spec2017-tutorial.html#appendix-i-working-spec-2017-benchmarks-x-cpu-model-table
** size:
This is a positional argument specifying the size of the
benchmark. The available sizes are: ref, test, train.
** --no-copy-logs:
This is an optional argument specifying the reports of
the benchmark run is not copied to the output folder.
The reports are copied by default.
** --allow-listeners:
This is an optional argument specifying gem5 to open GDB
listening ports. Usually, the ports are opened for debugging
purposes.
By default, the ports are off.
"""
import os
import sys
import m5
import m5.ticks
from m5.objects import *
import argparse
from system import *
def writeBenchScript(dir, benchmark_name, size, output_path):
"""
This method creates a script in dir which will be eventually
passed to the simulated system (to run a specific benchmark
at bootup).
"""
input_file_name = '{}/run_{}_{}'.format(dir, benchmark_name, size)
with open(input_file_name, "w") as f:
f.write('{} {} {}'.format(benchmark_name, size, output_path))
return input_file_name
def parse_arguments():
parser = argparse.ArgumentParser(description=
"gem5 config file to run SPEC benchmarks")
parser.add_argument("kernel", type = str, help = "Path to vmlinux")
parser.add_argument("disk", type = str,
help = "Path to the disk image containing SPEC benchmarks")
parser.add_argument("cpu", type = str, help = "Name of the detailed CPU")
parser.add_argument("mem_sys", type = str, help = "Name of the memory system")
parser.add_argument("benchmark", type = str,
help = "Name of the SPEC benchmark")
parser.add_argument("size", type = str,
help = "Available sizes: test, train, ref")
parser.add_argument("-l", "--no-copy-logs", default = False,
action = "store_true",
help = "Not to copy SPEC run logs to the host system;"
"Logs are copied by default")
parser.add_argument("-z", "--allow-listeners", default = False,
action = "store_true",
help = "Turn on ports;"
"The ports are off by default")
return parser.parse_args()
def getDetailedCPUModel(cpu_name):
'''
Return the CPU model corresponding to the cpu_name.
'''
available_models = {"kvm": X86KvmCPU,
"o3": DerivO3CPU,
"atomic": AtomicSimpleCPU,
"timing": TimingSimpleCPU
}
try:
available_models["FlexCPU"] = FlexCPU
except NameError:
# FlexCPU is not defined
pass
# https://docs.python.org/3/library/stdtypes.html#dict.get
# dict.get() returns None if the key does not exist
return available_models.get(cpu_name)
def getBenchmarkName(benchmark_name):
if benchmark_name.endswith("(base)"):
benchmark_name = benchmark_name[:-6]
return benchmark_name
def create_system(linux_kernel_path, disk_image_path, detailed_cpu_model, memory_system):
# create the system we are going to simulate
ruby_protocols = [ "MI_example", "MESI_Two_Level", "MOESI_CMP_directory"]
if memory_system == 'classic':
system = MySystem(kernel = linux_kernel_path,
disk = disk_image_path,
num_cpus = 1, # run the benchmark in a single thread
no_kvm = False,
TimingCPUModel = detailed_cpu_model)
elif memory_system in ruby_protocols:
system = MyRubySystem(kernel = linux_kernel_path,
disk = disk_image_path,
num_cpus = 1, # run the benchmark in a single thread
mem_sys = memory_system,
no_kvm = False,
TimingCPUModel = detailed_cpu_model)
else:
m5.fatal("Bad option for mem_sys, should be "
"{}, or 'classic'".format(', '.join(ruby_protocols)))
# For workitems to work correctly
# This will cause the simulator to exit simulation when the first work
# item is reached and when the first work item is finished.
system.work_begin_exit_count = 1
system.work_end_exit_count = 1
# set up the root SimObject and start the simulation
root = Root(full_system = True, system = system)
if system.getHostParallel():
# Required for running kvm on multiple host cores.
# Uses gem5's parallel event queue feature
# Note: The simulator is quite picky about this number!
root.sim_quantum = int(1e9) # 1 ms
return root, system
def boot_linux():
'''
Output 1: False if errors occur, True otherwise
Output 2: exit cause
'''
print("Booting Linux")
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
success = exit_cause == "m5_exit instruction encountered"
if not success:
print("Error while booting linux: {}".format(exit_cause))
exit(1)
print("Booting done")
return success, exit_cause
def run_spec_benchmark():
'''
Output 1: False if errors occur, True otherwise
Output 2: exit cause
'''
print("Start running benchmark")
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
success = exit_cause == "m5_exit instruction encountered"
if not success:
print("Error while running benchmark: {}".format(exit_cause))
exit(1)
print("Benchmark done")
return success, exit_cause
def copy_spec_logs():
'''
Output 1: False if errors occur, True otherwise
Output 2: exit cause
'''
print("Copying SPEC logs")
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
success = exit_cause == "m5_exit instruction encountered"
if not success:
print("Error while copying SPEC log files: {}".format(exit_cause))
exit(1)
print("Copying done")
return success, exit_cause
if __name__ == "__m5_main__":
args = parse_arguments()
cpu_name = args.cpu
mem_sys = args.mem_sys
benchmark_name = getBenchmarkName(args.benchmark)
benchmark_size = args.size
linux_kernel_path = args.kernel
disk_image_path = args.disk
no_copy_logs = args.no_copy_logs
allow_listeners = args.allow_listeners
if not no_copy_logs and not os.path.isabs(m5.options.outdir):
print("Please specify the --outdir (output directory) of gem5"
" in the form of an absolute path")
print("An example: build/X86/gem5.opt --outdir /home/user/m5out/"
" configs-spec-tests/run_spec ...")
exit(1)
output_dir = os.path.join(m5.options.outdir, "speclogs")
# Get the DetailedCPU class from its name
detailed_cpu = getDetailedCPUModel(cpu_name)
if detailed_cpu == None:
print("'{}' is not define in the config script.".format(cpu_name))
print("Change getDeatiledCPUModel() in run_spec.py "
"to add more CPU Models.")
exit(1)
if not benchmark_size in ["ref", "train", "test"]:
print("Benchmark size must be one of the following: ref, train, test")
exit(1)
root, system = create_system(linux_kernel_path, disk_image_path,
detailed_cpu, mem_sys)
# Create and pass a script to the simulated system to run the required
# benchmark
system.readfile = writeBenchScript(m5.options.outdir, benchmark_name,
benchmark_size, output_dir)
# needed for long running jobs
if not allow_listeners:
m5.disableAllListeners()
# instantiate all of the objects we've created above
m5.instantiate()
# booting linux
success, exit_cause = boot_linux()
# reset stats
print("Reset stats")
m5.stats.reset()
# switch from KVM to detailed CPU
if not cpu_name == "kvm":
print("Switching to detailed CPU")
system.switchCpus(system.cpu, system.detailed_cpu)
print("Switching done")
# running benchmark
print("Benchmark: {}; Size: {}".format(benchmark_name, benchmark_size))
success, exit_cause = run_spec_benchmark()
# output the stats after the benchmark is complete
print("Output stats")
m5.stats.dump()
if not no_copy_logs:
# create the output folder
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# switch from detailed CPU to KVM
if not cpu_name == "kvm":
print("Switching to KVM")
system.switchCpus(system.detailed_cpu, system.cpu)
print("Switching done")
# copying logs
success, exit_cause = copy_spec_logs()
| 40.50495
| 152
| 0.638149
|
ba808e76debc5c887ce9cd7a6459a996d9fabca4
| 346
|
py
|
Python
|
lists_as_stacks_and_queues/exercise/04_fashion_boutique.py
|
Galchov/python-advanced
|
1961205415a634948f4a50cf6e60b630ee645ad5
|
[
"MIT"
] | null | null | null |
lists_as_stacks_and_queues/exercise/04_fashion_boutique.py
|
Galchov/python-advanced
|
1961205415a634948f4a50cf6e60b630ee645ad5
|
[
"MIT"
] | null | null | null |
lists_as_stacks_and_queues/exercise/04_fashion_boutique.py
|
Galchov/python-advanced
|
1961205415a634948f4a50cf6e60b630ee645ad5
|
[
"MIT"
] | null | null | null |
clothes = [int(i) for i in input().split()] # List of clothes' values
rack_capacity = int(input()) # The maximum capacity of one rack
racks = 1
sum_clothes = 0
while clothes:
value = clothes.pop()
sum_clothes += value
if sum_clothes > rack_capacity:
sum_clothes = value
racks += 1
print(racks)
| 23.066667
| 81
| 0.615607
|
e9e72bd4bc1cc77dbbd1ce4b2f7792b483950551
| 1,540
|
py
|
Python
|
models/utils/video_record.py
|
kolbytn/alfred
|
ef6fd8d61ecc59e1b4c805fce1b1c595af8e1049
|
[
"MIT"
] | null | null | null |
models/utils/video_record.py
|
kolbytn/alfred
|
ef6fd8d61ecc59e1b4c805fce1b1c595af8e1049
|
[
"MIT"
] | null | null | null |
models/utils/video_record.py
|
kolbytn/alfred
|
ef6fd8d61ecc59e1b4c805fce1b1c595af8e1049
|
[
"MIT"
] | null | null | null |
# video recorder helper class
from datetime import datetime
import cv2
class VideoRecord:
def __init__(self, path, name, fps=5):
"""
param:
path: video save path (str)
name: video name (str)
fps: frames per second (int) (default=5)
example usage:
rec = VideoRecord('path/to/', 'filename', 10)
"""
self.path = path
self.name = name
self.fps = fps
self.frames = []
def record_frame(self, env_frame):
"""
records video frame in this object
param:
env_frame: a frame from thor environment (ThorEnv().last_event.frame)
example usage:
env = Thorenv()
lastframe = env.last_event.frame
rec.record_frame(lastframes)
"""
curr_image = Image.fromarray(np.uint8(env_frame))
img = cv2.cvtColor(np.asarray(curr_image), cv2.COLOR_RGB2BGR)
self.frames.append(img)
def savemp4(self):
"""
writes video to file at specified location, finalize video file
example usage:
rec.savemp4()
"""
if len(self.frames) == 0:
raise Exception("Can't write video file with no frames recorded")
height, width, layers = self.frames[0].shape
size = (width,height)
out = cv2.VideoWriter(f"{self.path}{self.name}.mp4", 0x7634706d, self.fps, size)
for i in range(len(self.frames)):
out.write(self.frames[i])
out.release()
| 35
| 88
| 0.567532
|
cd85d835253ee4ee43536503ba66f164f1a360f9
| 4,438
|
py
|
Python
|
psltdsim/plot/BAALtest.py
|
thadhaines/PSLTDSim
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
[
"MIT"
] | null | null | null |
psltdsim/plot/BAALtest.py
|
thadhaines/PSLTDSim
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
[
"MIT"
] | null | null | null |
psltdsim/plot/BAALtest.py
|
thadhaines/PSLTDSim
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
[
"MIT"
] | null | null | null |
def BAALtest(mirror, blkFlag=True, printFigs=False):
"""Meant to test adherence to BA ACE Limits."""
# simple visual test as of 12/30/19
import matplotlib.pyplot as plt
import numpy as np
import itertools
mir = mirror
xend = max(mir.r_t)
mini = 1 # can be increased to scale width of plots
caseName = mir.simParams['fileName'][:-1]
mins = np.array(mir.r_t)/60.0;
minEnd = max(mins)
#firstPlot = True
# Create single clock minute Average Frequency...
fig, ax = plt.subplots()
ax.plot(mins, np.array(mir.r_f)*mir.fBase , label='Frequency', c=[0,0,0],
linewidth=1.0)
## Create list of clock minute average frequency
ts = mir.simParams['timeStep']
Nsmp = int(60/ts) # number of samples per minute
aveF = np.zeros_like(mir.r_f)
sNDX = 0
done = False
while not done:
eNDX = sNDX + Nsmp
if eNDX > len(mir.r_f)-1:
eNDX = len(mir.r_f)-1
done = True
preVal = sumF/Nsmp
sumF = sum(mir.r_f[sNDX:eNDX])
aveF[sNDX:eNDX] = sumF/Nsmp
sNDX = eNDX
if done:
aveF[eNDX:] = preVal
ax.plot(mins, np.array(aveF)*mir.fBase, label="Minute Average",color=[1,0,1]
, alpha=0.66)
ax.set_title('Minute Average System Frequency \n Case: ' + caseName)
ax.set_xlim(0,minEnd)
ax.set_ylabel('Frequency [Hz]')
ax.set_xlabel('Time [minutes]')
ax.grid(True)
ax.legend(loc='right')
fig.set_dpi(150)
fig.set_size_inches(9/mini, 2.5)
fig.tight_layout()
plt.show(block=False)
if printFigs: plt.savefig(caseName+'MinFave.pdf', dpi=300)
plt.pause(0.00001) # required for true non-blocking print...
for BA in mir.BA:
# Create seperate figure for each BA
fig, ax = plt.subplots()
# calcs for BAAL
eps = 0.0228 # ave Hz
BAAL = []
fBase = mir.simParams['fBase']
for fa in aveF:
if fa < 1:
FTL = fBase-3*eps
else:
FTL = fBase+3*eps
#not the best way to do this
# negative from B removed as this is typicall a neg val
BAAL.append(10*BA.B*(FTL-fBase) * (FTL-fBase)/(fa*fBase-fBase))
ax.plot(mins,BA.r_RACE,
linewidth=1.0,
c=[0,0,0],
label ='RACE')
#ax.plot(mins,BAAL,
#c=[0,1,0],
#label = 'BAAL')
ax.fill_between(mins, 0, BAAL,
color=[0,1,0],
#alpha=0.75,
label = 'BAAL')
overBAAL = []
for (baal, race) in itertools.zip_longest(BAAL, BA.r_RACE):
if baal > 0 and race > baal:
# violation
overBAAL.append(1)
continue
if baal < 0 and race < baal:
# violation
overBAAL.append(-1)
continue
overBAAL.append(0)
#ax.plot(mins,np.array(overBAAL)*max(BAAL),
#c=[1,0,1],
#label = 'OverBAAL')
ax.fill_between(mins, 0, np.array(overBAAL)*max(np.abs(BAAL)),
color=[1,0,1],
alpha=0.666,
label = 'BAAL Exceeded')
#if firstPlot:
# Scale current axis.
#box = ax.get_position()
#boxW = box.width * 1.05
#firstPlot = False
#ax.set_position([box.x0, box.y0, boxW, box.height])
# Put a legend to the right of the current axis
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#print(overBAAL)# DEBUG
ax.legend(loc='right')
ax.set_title(r'Area '+str(BA.Area.Area)+' ('+ BA.name + ') BAAL \n Case: ' + caseName)
ax.set_xlim(0,minEnd)
# scale obsurd axis
if max(BAAL)/50 > max(BA.r_RACE):
ax.set_ylim(min(BA.r_RACE)*1.25, max(BA.r_RACE)*1.25)
ax.set_ylabel('ACE [MW]')
ax.set_xlabel('Time [minutes]')
#ax.legend(loc=0)
ax.grid(True)
fig.set_dpi(150)
fig.set_size_inches(9/mini, 2.5)
fig.tight_layout()
plt.show(block=False)
if printFigs: plt.savefig(caseName+BA.name+'BAAL.pdf', dpi=300)
plt.pause(0.00001) # required for true non-blocking print...
plt.show(block = blkFlag)
plt.pause(0.00001)
| 29.390728
| 94
| 0.527715
|
3b403aed18cdab661d9edfd823bf74e9033f3ce8
| 1,027
|
py
|
Python
|
river/preprocessing/__init__.py
|
deepcharles/river
|
046e7334d24fb5e5d127473d5c6988c75875f658
|
[
"BSD-3-Clause"
] | 1
|
2021-04-19T10:47:11.000Z
|
2021-04-19T10:47:11.000Z
|
river/preprocessing/__init__.py
|
henrygouk/river
|
744c87786d2bc87d7256f9c61b18dc10c9a2bb88
|
[
"BSD-3-Clause"
] | null | null | null |
river/preprocessing/__init__.py
|
henrygouk/river
|
744c87786d2bc87d7256f9c61b18dc10c9a2bb88
|
[
"BSD-3-Clause"
] | 1
|
2021-01-22T15:18:39.000Z
|
2021-01-22T15:18:39.000Z
|
"""Feature preprocessing.
The purpose of this module is to modify an existing set of features so that they can be processed
by a machine learning algorithm. This may be done by scaling numeric parts of the data or by
one-hot encoding categorical features. The difference with the `feature_extraction` module is that
the latter extracts new information from the data
"""
from .feature_hasher import FeatureHasher
from .impute import PreviousImputer
from .impute import StatImputer
from .lda import LDA
from .one_hot import OneHotEncoder
from .scale import AdaptiveStandardScaler
from .scale import Binarizer
from .scale import MaxAbsScaler
from .scale import MinMaxScaler
from .scale import Normalizer
from .scale import RobustScaler
from .scale import StandardScaler
__all__ = [
"AdaptiveStandardScaler",
"Binarizer",
"FeatureHasher",
"LDA",
"MaxAbsScaler",
"MinMaxScaler",
"Normalizer",
"OneHotEncoder",
"PreviousImputer",
"RobustScaler",
"StandardScaler",
"StatImputer",
]
| 27.756757
| 98
| 0.767283
|
89469aa151aaa62d0d2f8fd553f158806c4a5636
| 1,723
|
py
|
Python
|
setup.py
|
nscherf/nighres
|
b4304a1ee7d878a0fa986f537b35ad9b65898bfe
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nscherf/nighres
|
b4304a1ee7d878a0fa986f537b35ad9b65898bfe
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nscherf/nighres
|
b4304a1ee7d878a0fa986f537b35ad9b65898bfe
|
[
"Apache-2.0"
] | 1
|
2019-01-21T10:53:38.000Z
|
2019-01-21T10:53:38.000Z
|
from os import path
from setuptools import setup, find_packages
# from setuptools.command.develop import develop
# from setuptools.command.install import install
# from subprocess import check_call
here = path.abspath(path.dirname(__file__))
build_script = path.join(here, "build.sh")
with open('README.rst') as f:
long_description = f.read()
# # These commands run the build.sh script during pip installation
# class PostDevelopCommand(develop):
# """Post-installation for development mode."""
# def run(self):
# develop.run(self)
# check_call(build_script)
#
#
# class PostInstallCommand(install):
# """Post-installation for installation mode."""
# def run(self):
# install.run(self)
# check_call(build_script)
setup(
name='nighres',
version='1.0.0b9',
description='Processing tools for high-resolution neuroimaging',
long_description=long_description,
url='https://nighres.readthedocs.io/',
author='Julia M Huntenburg, Pierre-Louis Bazin, Chris Steele',
author_email='ju.huntenburg@gmail.com',
license='Apache License, 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
],
# cmdclass={
# 'develop': PostDevelopCommand,
# 'install': PostInstallCommand,
# },
keywords='MRI high-resolution laminar',
packages=find_packages(),
include_package_data=True,
install_requires=['numpy', 'nibabel'],
)
| 33.134615
| 70
| 0.644806
|
4c5cf03289f9ca75ffb8335159a5bd49a4cfadf5
| 11,017
|
py
|
Python
|
devito/core/cpu.py
|
cha-tzi/devito
|
45b14b7625a21631bd026a0c77911bc46dad9ead
|
[
"MIT"
] | null | null | null |
devito/core/cpu.py
|
cha-tzi/devito
|
45b14b7625a21631bd026a0c77911bc46dad9ead
|
[
"MIT"
] | 3
|
2020-11-30T06:23:14.000Z
|
2022-03-07T19:02:27.000Z
|
devito/core/cpu.py
|
cha-tzi/devito
|
45b14b7625a21631bd026a0c77911bc46dad9ead
|
[
"MIT"
] | null | null | null |
from functools import partial
from devito.core.operator import OperatorCore
from devito.exceptions import InvalidOperator
from devito.passes.clusters import (Blocking, Lift, cire, cse, eliminate_arrays,
extract_increments, factorize, fuse, optimize_pows)
from devito.passes.iet import (DataManager, Ompizer, avoid_denormals, mpiize,
optimize_halospots, hoist_prodders, relax_incr_dimensions)
from devito.tools import as_tuple, timed_pass
__all__ = ['CPU64NoopOperator', 'CPU64Operator', 'CPU64OpenMPOperator',
'Intel64Operator', 'Intel64OpenMPOperator', 'Intel64FSGOperator',
'Intel64FSGOpenMPOperator',
'PowerOperator', 'PowerOpenMPOperator',
'ArmOperator', 'ArmOpenMPOperator',
'CustomOperator']
class CPU64NoopOperator(OperatorCore):
BLOCK_LEVELS = 1
"""
Loop blocking depth. So, 1 => "blocks", 2 => "blocks" and "sub-blocks",
3 => "blocks", "sub-blocks", and "sub-sub-blocks", ...
"""
CIRE_REPEATS_INV = 1
"""
Number of CIRE passes to detect and optimize away Dimension-invariant expressions.
"""
CIRE_REPEATS_SOPS = 5
"""
Number of CIRE passes to detect and optimize away redundant sum-of-products.
"""
CIRE_MINCOST_INV = 50
"""
Minimum operation count of a Dimension-invariant aliasing expression to be
optimized away. Dimension-invariant aliases are lifted outside of one or more
invariant loop(s), so they require tensor temporaries that can be potentially
very large (e.g., the whole domain in the case of time-invariant aliases).
"""
CIRE_MINCOST_SOPS = 10
"""
Minimum operation count of a sum-of-product aliasing expression to be optimized away.
"""
@classmethod
def _normalize_kwargs(cls, **kwargs):
options = kwargs['options']
options['blocklevels'] = options['blocklevels'] or cls.BLOCK_LEVELS
options['cire-repeats'] = {
'invariants': options.pop('cire-repeats-inv') or cls.CIRE_REPEATS_INV,
'sops': options.pop('cire-repeats-sops') or cls.CIRE_REPEATS_SOPS
}
options['cire-mincost'] = {
'invariants': options.pop('cire-mincost-inv') or cls.CIRE_MINCOST_INV,
'sops': options.pop('cire-mincost-sops') or cls.CIRE_MINCOST_SOPS
}
return kwargs
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# Shared-memory parallelism
if options['openmp']:
ompizer = Ompizer(sregistry)
ompizer.make_parallel(graph)
# Symbol definitions
data_manager = DataManager(sregistry)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
class CPU64Operator(CPU64NoopOperator):
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
"""
Optimize Clusters for better runtime performance.
"""
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Blocking to improve data locality
clusters = Blocking(options).process(clusters)
# Reduce flops (potential arithmetic alterations)
clusters = extract_increments(clusters, sregistry)
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# Reduce flops (no arithmetic alterations)
clusters = cse(clusters, sregistry)
# The previous passes may have created fusion opportunities, which in
# turn may enable further optimizations
clusters = fuse(clusters)
clusters = eliminate_arrays(clusters)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph)
# Distributed-memory parallelism
optimize_halospots(graph)
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# Lower IncrDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, sregistry=sregistry)
# SIMD-level parallelism
ompizer = Ompizer(sregistry)
ompizer.make_simd(graph, simd_reg_size=platform.simd_reg_size)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
data_manager = DataManager(sregistry)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
class CPU64OpenMPOperator(CPU64Operator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph)
# Distributed-memory parallelism
optimize_halospots(graph)
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# Lower IncrDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, sregistry=sregistry)
# SIMD-level parallelism
ompizer = Ompizer(sregistry)
ompizer.make_simd(graph, simd_reg_size=platform.simd_reg_size)
# Shared-memory parallelism
ompizer.make_parallel(graph)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
data_manager = DataManager(sregistry)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
Intel64Operator = CPU64Operator
Intel64OpenMPOperator = CPU64OpenMPOperator
class Intel64FSGOperator(Intel64Operator):
"""
Operator with performance optimizations tailored "For Small Grids" (FSG).
"""
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Reduce flops (potential arithmetic alterations)
clusters = extract_increments(clusters, sregistry)
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# Reduce flops (no arithmetic alterations)
clusters = cse(clusters, sregistry)
# The previous passes may have created fusion opportunities, which in
# turn may enable further optimizations
clusters = fuse(clusters)
clusters = eliminate_arrays(clusters)
# Blocking to improve data locality
clusters = Blocking(options).process(clusters)
return clusters
class Intel64FSGOpenMPOperator(Intel64FSGOperator, CPU64OpenMPOperator):
_specialize_iet = CPU64OpenMPOperator._specialize_iet
PowerOperator = CPU64Operator
PowerOpenMPOperator = CPU64OpenMPOperator
ArmOperator = CPU64Operator
ArmOpenMPOperator = CPU64OpenMPOperator
class CustomOperator(CPU64Operator):
_known_passes = ('blocking', 'denormals', 'optcomms', 'openmp', 'mpi',
'simd', 'prodders', 'topofuse', 'fuse')
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
return {
'blocking': Blocking(options).process,
'fuse': fuse,
'topofuse': lambda i: fuse(i, toposort=True)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
ompizer = Ompizer(sregistry)
return {
'denormals': avoid_denormals,
'optcomms': optimize_halospots,
'blocking': partial(relax_incr_dimensions, sregistry=sregistry),
'openmp': ompizer.make_parallel,
'mpi': partial(mpiize, mode=options['mpi']),
'simd': partial(ompizer.make_simd, simd_reg_size=platform.simd_reg_size),
'prodders': hoist_prodders
}
@classmethod
def _build(cls, expressions, **kwargs):
# Sanity check
passes = as_tuple(kwargs['mode'])
if any(i not in cls._known_passes for i in passes):
raise InvalidOperator("Unknown passes `%s`" % str(passes))
return super(CustomOperator, cls)._build(expressions, **kwargs)
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
passes = as_tuple(kwargs['mode'])
# Fetch passes to be called
passes_mapper = cls._make_clusters_passes_mapper(**kwargs)
# Call passes
for i in passes:
try:
clusters = passes_mapper[i](clusters)
except KeyError:
pass
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
sregistry = kwargs['sregistry']
passes = as_tuple(kwargs['mode'])
# Fetch passes to be called
passes_mapper = cls._make_iet_passes_mapper(**kwargs)
# Call passes
for i in passes:
try:
passes_mapper[i](graph)
except KeyError:
pass
# Force-call `mpi` if requested via global option
if 'mpi' not in passes and options['mpi']:
passes_mapper['mpi'](graph)
# Force-call `openmp` if requested via global option
if 'openmp' not in passes and options['openmp']:
passes_mapper['openmp'](graph)
# Symbol definitions
data_manager = DataManager(sregistry)
data_manager.place_definitions(graph)
data_manager.place_casts(graph)
return graph
| 32.026163
| 89
| 0.650177
|
13cdaa3738fef4f6828f0d535ebc92efae7979e0
| 1,054
|
py
|
Python
|
CodingInterview2/32_01_PrintTreeFromTopToBottom/print_tree_from_top_to_bottom.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 10
|
2020-07-06T11:00:58.000Z
|
2022-01-29T09:25:24.000Z
|
CodingInterview2/32_01_PrintTreeFromTopToBottom/print_tree_from_top_to_bottom.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | null | null | null |
CodingInterview2/32_01_PrintTreeFromTopToBottom/print_tree_from_top_to_bottom.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 3
|
2020-07-13T06:39:23.000Z
|
2020-08-15T16:29:48.000Z
|
"""
面试题32(一):不分行从上往下打印二叉树
题目:从上往下打印出二叉树的每个结点,同一层的结点按照从左到右的顺序打印。
"""
class BinaryTreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def connect_binarytree_nodes(parent: BinaryTreeNode,
left: BinaryTreeNode,
right: BinaryTreeNode) -> BinaryTreeNode:
if parent:
parent.left = left
parent.right = right
return parent
def print_binary_tree(tree: BinaryTreeNode) -> list:
"""
Print tree from top to bottom.
Parameters
-----------
binary_tree: BinaryTreeNode
Returns
---------
out: list
Tree items list.
Notes
------
"""
if not tree:
return []
queue = [tree]
res = []
while len(queue):
node = queue.pop(0)
res.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return res
if __name__ == '__main__':
pass
| 16.215385
| 70
| 0.536053
|
e5fcf0cc20bab3d38e418826528cdbca7c36fb2f
| 367
|
py
|
Python
|
users/migrations/0005_rename_discription_user_bio.py
|
alexdeathway/Gecom
|
2a0fc87887d73d15eba183625dc8a429defe851f
|
[
"MIT"
] | 7
|
2021-11-15T06:28:05.000Z
|
2022-02-22T11:36:00.000Z
|
users/migrations/0005_rename_discription_user_bio.py
|
alexdeathway/Gecom
|
2a0fc87887d73d15eba183625dc8a429defe851f
|
[
"MIT"
] | 3
|
2021-11-02T16:10:49.000Z
|
2022-02-01T08:30:38.000Z
|
users/migrations/0005_rename_discription_user_bio.py
|
alexdeathway/Gecom
|
2a0fc87887d73d15eba183625dc8a429defe851f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2022-01-22 07:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_alter_user_profile_image'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='discription',
new_name='bio',
),
]
| 19.315789
| 51
| 0.588556
|
dac8abd6d1da6fd9a95ab57baed7d37de4c05158
| 10,744
|
py
|
Python
|
public/yum-3.2.28/test/packagetests.py
|
chillaxor/blogbin
|
211202d513fa80a3d22fb3963f36a01a8dec5b68
|
[
"MIT"
] | null | null | null |
public/yum-3.2.28/test/packagetests.py
|
chillaxor/blogbin
|
211202d513fa80a3d22fb3963f36a01a8dec5b68
|
[
"MIT"
] | 5
|
2021-02-02T08:17:10.000Z
|
2022-02-27T06:53:42.000Z
|
public/yum-3.2.28/test/packagetests.py
|
chillaxor/blogbin
|
211202d513fa80a3d22fb3963f36a01a8dec5b68
|
[
"MIT"
] | null | null | null |
import unittest
import settestpath
from yum import packages
from rpmUtils import miscutils
class InPrcoRangePackageTests(unittest.TestCase):
def setUp(self):
self.po = packages.RpmBase()
self.po.rel = 10
self.po.prco['provides'].append(("seth", "EQ", (1, 2, 3)))
self.po.prco['requires'].append(("foo", "GE", (4, 5, None)))
def testRequiresEqPass(self):
dep = ("foo", "EQ", (4, 5, 0))
self.assertTrue(self.po.inPrcoRange('requires', dep))
def testRequiresEqFailGt(self):
dep = ("foo", "EQ", (4, 4, 0))
self.assertFalse(self.po.inPrcoRange('requires', dep))
def testProvidesGePass(self):
dep = ("seth", "GE", (1, 0, 0))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesGePassWithEqual(self):
dep = ("seth", "GE", (1, 2, 3))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesGeFailOnEpoch(self):
dep = ("seth", "GE", (2, 0, 0))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesGeFailOnVersion(self):
dep = ("seth", "GE", (1, 3, 0))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesGeFailOnRelease(self):
dep = ("seth", "GE", (1, 2, 4))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesGtPass(self):
dep = ("seth", "GT", (1, 0, 0))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesGtFail(self):
dep = ("seth", "GT", (1, 2, 4))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesGtFailOnEqual(self):
dep = ("seth", "GT", (1, 2, 3))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesEqPass(self):
dep = ("seth", "EQ", (1, 2, 3))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesEqFailGt(self):
dep = ("seth", "EQ", (1, 2, 0))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesEqFailLt(self):
dep = ("seth", "EQ", (1, 2, 4))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesLePassEq(self):
dep = ("seth", "LE", (1, 2, 3))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesLePassGt(self):
dep = ("seth", "LE", (1, 5, 2))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesLeFail(self):
dep = ("seth", "LE", (0, 2, 2))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesLtPass(self):
dep = ("seth", "LT", (1, 2, 6))
self.assertTrue(self.po.inPrcoRange('provides', dep))
def testProvidesLtFailEq(self):
dep = ("seth", "LT", (1, 2, 3))
self.assertFalse(self.po.inPrcoRange('provides', dep))
def testProvidesLtFailGt(self):
dep = ("seth", "LT", (1, 0, 2))
self.assertFalse(self.po.inPrcoRange('provides', dep))
class PackageEvrTests(unittest.TestCase):
def setUp(self):
self.evr = packages.PackageEVR(0, 1, 2)
def testLtPass(self):
other_evr = packages.PackageEVR(0, 1, 5)
self.assertTrue(self.evr < other_evr)
def testLtFailEq(self):
other_evr = packages.PackageEVR(0, 1, 2)
self.assertFalse(self.evr < other_evr)
def testLtFailGt(self):
other_evr = packages.PackageEVR(0, 0, 2)
self.assertFalse(self.evr < other_evr)
def testLePassLt(self):
other_evr = packages.PackageEVR(0, 1, 5)
self.assertTrue(self.evr <= other_evr)
def testLePassEq(self):
other_evr = packages.PackageEVR(0, 1, 2)
self.assertTrue(self.evr <= other_evr)
def testLeFailGt(self):
other_evr = packages.PackageEVR(0, 0, 2)
self.assertFalse(self.evr <= other_evr)
def testGtPass(self):
other_evr = packages.PackageEVR(0, 1, 0)
self.assertTrue(self.evr > other_evr)
def testGtFailEq(self):
other_evr = packages.PackageEVR(0, 1, 2)
self.assertFalse(self.evr > other_evr)
def testGtFailLt(self):
other_evr = packages.PackageEVR(0, 2, 2)
self.assertFalse(self.evr > other_evr)
def testGePassGt(self):
other_evr = packages.PackageEVR(0, 1, 0)
self.assertTrue(self.evr >= other_evr)
def testGePassEq(self):
other_evr = packages.PackageEVR(0, 1, 2)
self.assertTrue(self.evr >= other_evr)
def testGeFailLt(self):
other_evr = packages.PackageEVR(2, 1, 2)
self.assertFalse(self.evr >= other_evr)
def testEqPass(self):
other_evr = packages.PackageEVR(0, 1, 2)
self.assertTrue(self.evr == other_evr)
def testEqFailGt(self):
other_evr = packages.PackageEVR(0, 1, 0)
self.assertFalse(self.evr == other_evr)
def testEqFailLt(self):
other_evr = packages.PackageEVR(0, 4, 2)
self.assertFalse(self.evr == other_evr)
class StubPkg(object):
def __init__(self, n, a, e, v, r):
self.pkgtup = (n, a, e, v, r)
class BuildPackageDictRefTests(unittest.TestCase):
def testNoPkg(self):
pkgs = []
self.assertEquals({}, packages.buildPkgRefDict(pkgs))
def testOnePkg(self):
pkg = StubPkg("yum", "noarch", 0, "3.1.1", 2)
pkgs = [pkg]
pkg_dict = packages.buildPkgRefDict(pkgs)
self.assertEquals(7, len(pkg_dict))
unseen_keys = ['yum', 'yum.noarch', 'yum-3.1.1-2.noarch', 'yum-3.1.1',
'yum-3.1.1-2', '0:yum-3.1.1-2.noarch', 'yum-0:3.1.1-2.noarch']
for key in pkg_dict.keys():
self.assertTrue(key in unseen_keys)
unseen_keys.remove(key)
self.assertEquals(1, len(pkg_dict[key]))
self.assertEquals(pkg, pkg_dict[key][0])
self.assertEquals(0, len(unseen_keys))
def _perms(evr): # Magic comp. sci. stuff ... oooh
e, v, r = evr
for num in range(8):
perm = []
if num & 1:
perm.append(e)
else:
perm.append(None)
if num & 2:
perm.append(v)
else:
perm.append(None)
if num & 4:
perm.append(r)
else:
perm.append(None)
yield tuple(perm)
class RangeCompareTests(unittest.TestCase):
def testRangeCompare(self):
def tst(requires, provides, result):
print requires, provides
self.assertEquals(miscutils.rangeCompare(requires, provides),result)
def tst_lege_prov(requires, provides, result):
if not result or provides[1] != 'EQ':
return
for flag in ('GE', 'LE'): # EQ is a subset of either LE or GE
nprovides = (provides[0], flag, provides[2])
tst(requires, nprovides, result)
def tst_lege_reqs(requires, provides, result):
tst_lege_prov(requires, provides, result)
if not result or requires[1] != 'EQ':
return
for flag in ('GE', 'LE'): # EQ is a subset of either LE or GE
nrequires = (requires[0], flag, requires[2])
tst(nrequires, provides, result)
tst_lege_prov(nrequires, provides, result)
def tst_none_reqs(requires, provides, result):
if (not result or requires[1] or provides[1] != 'EQ' or
requires[2] != (None, None, None)):
return
tst_lege_prov(requires, provides, result)
# Doesn't matter about versions
for flag in ('GE', 'EQ', 'LE'):
nrequires = (requires[0], flag, requires[2])
tst(nrequires, provides, result)
tst_lege_prov(nrequires, provides, result)
def tst_none_expand(requires, provides, result, *args):
if requires[2] != (None, None, None):
return
# Expand parts of the version, replacing with data from provides.
# Eg. (None, None, None) and ('1', '2', '3') becomes:
# (None, None, None)
# ('1', None, None)
# (None, '2', None)
# (None, None, '3')
# ('1', '2', None)
# ...
# ('1', '2', '3')
for evr in _perms(provides[2]):
nrequires = (requires[0], requires[1], evr)
for func in args:
func(nrequires, provides, result)
for requires, provides, result in (
(('foo', 'EQ', ('0', '1.4.4', '0')), ('foo', 'EQ', ('0', '1.4.4', '0')), 1),
(('foo', 'EQ', ('0', '1.4.4', '0')), ('foo', 'EQ', (None, '1.4.4', '0')), 1),
(('foo', 'EQ', ('0', '1.4.4', '0')), ('foo', 'EQ', ('0', '1.4.4', None)), 1),
(('foo', 'EQ', ('0', '1.4.4', None)), ('foo', 'EQ', ('0', '1.4.4', '8')), 1),
(('foo', 'LT', ('0', '1.5.4', None)), ('foo', 'EQ', ('0', '1.4.4', '7')), 1),
(('foo', 'GE', ('0', '1.4.4', '7.1')), ('foo', 'EQ', ('0', '1.4.4', '7')), 0),
(('foo', 'EQ', ('0', '1.4', None)), ('foo', 'EQ', ('0', '1.4.4', '7')), 0),
(('foo', 'GT', ('1', '1.4.4', None)), ('foo', 'EQ', ('3', '1.2.4', '7')), 1),
(('foo', None, (None, None, None)), ('foo', 'EQ', ('3', '1.2.4', '7')), 1),
(('fuu', None, (None, None, None)), ('foo', 'EQ', ('3', '1.2.4', '7')), 0),
(('foo', None, (None, None, None)), ('foo', 'GT', ('3', '1.2.4', '7')), 1),
(('foo', 'EQ', (None, None, None)), ('foo', 'GT', ('3', '1.2.4', '7')), 0),
(('foo', 'LT', (None, None, None)), ('foo', 'GT', ('3', '1.2.4', '7')), 0),
(('foo', 'LE', (None, None, None)), ('foo', 'GT', ('3', '1.2.4', '7')), 0),
(('foo', 'GE', (None, None, None)), ('foo', 'GT', ('3', '1.2.4', '7')), 1),
(('foo', 'GT', (None, None, None)), ('foo', 'GT', ('3', '1.2.4', '7')), 1),
(('foo', 'EQ', (None, None, None)), ('foo', 'LT', ('3', '1.2.4', '7')), 0),
(('foo', 'LT', (None, None, None)), ('foo', 'LT', ('3', '1.2.4', '7')), 1),
(('foo', 'LE', (None, None, None)), ('foo', 'LT', ('3', '1.2.4', '7')), 1),
(('foo', 'GE', (None, None, None)), ('foo', 'LT', ('3', '1.2.4', '7')), 0),
(('foo', 'GT', (None, None, None)), ('foo', 'LT', ('3', '1.2.4', '7')), 0),
):
tst(requires, provides, result)
tst_lege_reqs(requires, provides, result)
tst_none_expand(requires, provides, result,
tst, tst_lege_reqs, tst_none_reqs)
| 37.830986
| 91
| 0.520197
|
3f277d682edf17047366602c49855b77e9c27ab2
| 4,052
|
py
|
Python
|
evaluation/2017/evaluation_method.py
|
Limingxing00/RDE-VOS-CVPR2022
|
548a889395e2370c07962692e01e3abf8f518153
|
[
"MIT"
] | null | null | null |
evaluation/2017/evaluation_method.py
|
Limingxing00/RDE-VOS-CVPR2022
|
548a889395e2370c07962692e01e3abf8f518153
|
[
"MIT"
] | null | null | null |
evaluation/2017/evaluation_method.py
|
Limingxing00/RDE-VOS-CVPR2022
|
548a889395e2370c07962692e01e3abf8f518153
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from time import time
import argparse
import numpy as np
import pandas as pd
from davis2017.evaluation import DAVISEvaluation
# from OSS import save_csv
"""
#### 2017 val #####
python evaluation_method.py \
--results_path \
mingxing/gdata1/mask_propgagation/Stage_2_bs_24_lr_3e-5_from_static_0000_200000_50000
#### 2017 test-dev #####
python evaluation_STCN.py --results_path mingxing/gdata2/STCN/davis2017-test/s3-perturb-group-v3-klloss_weight10 --set test-dev \
--davis_path /config/data/limx/DAVIS2017-480P/DAVIS2017-480p-ori/test-dev
"""
default_davis_path = '../DAVIS2017-480P/DAVIS2017-480p-ori/trainval'
time_start = time()
parser = argparse.ArgumentParser()
parser.add_argument('--davis_path', type=str, help='Path to the DAVIS folder containing the JPEGImages, Annotations, '
'ImageSets, Annotations_unsupervised folders',
required=False, default=default_davis_path)
parser.add_argument('--set', type=str, help='Subset to evaluate the results', default='val')
parser.add_argument('--task', type=str, help='Task to evaluate the results', default='semi-supervised',
choices=['semi-supervised', 'unsupervised'])
parser.add_argument('--results_path', type=str, help='Path to the folder containing the sequences folders',
required=True, default='')
args, _ = parser.parse_known_args()
csv_name_global = f'global_results-{args.set}.csv'
csv_name_per_sequence = f'per-sequence_results-{args.set}.csv'
# Check if the method has been evaluated before, if so read the results, otherwise compute the results
csv_name_global_path = os.path.join(args.results_path, csv_name_global)
csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence)
if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path):
print('Using precomputed results...')
table_g = pd.read_csv(csv_name_global_path)
table_seq = pd.read_csv(csv_name_per_sequence_path)
else:
print(f'Evaluating sequences for the {args.task} task...')
# Create dataset and evaluate
dataset_eval = DAVISEvaluation(davis_root=args.davis_path, task=args.task, gt_set=args.set)
metrics_res = dataset_eval.evaluate(args.results_path)
J, F = metrics_res['J'], metrics_res['F']
# Generate dataframe for the general results
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]),
np.mean(F["D"])])
g_res = np.reshape(g_res, [1, len(g_res)])
table_g = pd.DataFrame(data=g_res, columns=g_measures)
with open(csv_name_global_path, 'w') as f:
table_g.to_csv(f, index=False, float_format="%.3f")
# save_csv(csv_name_global_path, table_g)
print(f'Global results saved in {csv_name_global_path}')
# Generate a dataframe for the per sequence results
seq_names = list(J['M_per_object'].keys())
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures)
with open(csv_name_per_sequence_path, 'w') as f:
table_seq.to_csv(f, index=False, float_format="%.3f")
# save_csv(csv_name_per_sequence_path, table_seq)
print(f'Per-sequence results saved in {csv_name_per_sequence_path}')
# Print the results
sys.stdout.write(f"--------------------------- Global results for {args.set} ---------------------------\n")
print(table_g.to_string(index=False))
sys.stdout.write(f"\n---------- Per sequence results for {args.set} ----------\n")
print(table_seq.to_string(index=False))
total_time = time() - time_start
sys.stdout.write('\nTotal time:' + str(total_time))
| 48.238095
| 129
| 0.698667
|
8ae01198ff39739ac60a5a1a0abc36a82d3c4d12
| 938
|
py
|
Python
|
agile_analytics/__init__.py
|
cmheisel/jira-agile-extractor
|
8c5445234c0c950ef30763b787e4bbacfe70ec2a
|
[
"MIT"
] | 14
|
2016-08-03T13:38:59.000Z
|
2021-10-07T01:50:12.000Z
|
agile_analytics/__init__.py
|
cmheisel/agile-analytics
|
8c5445234c0c950ef30763b787e4bbacfe70ec2a
|
[
"MIT"
] | 42
|
2016-07-22T19:32:39.000Z
|
2021-04-30T20:32:06.000Z
|
agile_analytics/__init__.py
|
cmheisel/jira-agile-extractor
|
8c5445234c0c950ef30763b787e4bbacfe70ec2a
|
[
"MIT"
] | 1
|
2019-08-02T05:30:38.000Z
|
2019-08-02T05:30:38.000Z
|
"""Pulls data from agile systems and analyzes it."""
from .version import __version__, __author__
from .fetchers import (
JIRAFetcher,
convert_jira_issue
)
from .analyzers import (
DateAnalyzer,
PartialDateAnalyzer,
)
from .reporters import (
ThroughputReporter,
LeadTimeDistributionReporter,
TicketReporter,
LeadTimePercentileReporter,
CycleTimePercentileReporter,
SLAReporter,
CreatedReporter,
)
from .writers import (
CSVWriter,
GSheetWriter
)
version = ".".join(map(str, __version__))
__all__ = [
"version",
"__version__",
"__author__",
"JIRAFetcher",
"convert_jira_issue",
"DateAnalyzer",
"ThroughputReporter",
"LeadTimeDistributionReporter",
"TicketReporter",
"CSVWriter",
"GSheetWriter",
"LeadTimePercentileReporter",
"SLAReporter",
"PartialDateAnalyzer",
"CreatedReporter",
"CycleTimePercentileReporter"
]
| 18.392157
| 52
| 0.695096
|
73c99306bd08803c0fec95f6c4ba27c22ea3cc2d
| 30,561
|
py
|
Python
|
fpga/app/template/tb/mqnic_core_pcie_us/test_mqnic_core_pcie_us.py
|
missinglinkelectronics/corundum
|
1797fdececb6a5fe866b649345187e05d3fb2a30
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2022-01-24T04:54:00.000Z
|
2022-01-24T04:54:00.000Z
|
fpga/app/template/tb/mqnic_core_pcie_us/test_mqnic_core_pcie_us.py
|
linjw16/corundum
|
65ad32421bacc497823ca939b0b9f0801063c4ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
fpga/app/template/tb/mqnic_core_pcie_us/test_mqnic_core_pcie_us.py
|
linjw16/corundum
|
65ad32421bacc497823ca939b0b9f0801063c4ea
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2022-03-01T13:36:39.000Z
|
2022-03-01T13:36:39.000Z
|
"""
Copyright 2021, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.axi import AxiStreamBus
from cocotbext.eth import EthMac
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
# pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk,
user_reset=dut.rst,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),
# Completer reQuest Interface
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver()
self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**len(dut.core_pcie_inst.axil_ctrl_araddr), ext=True, prefetch=True)
if hasattr(dut.core_pcie_inst, 'pcie_app_ctrl'):
self.dev.functions[0].configure_bar(2, 2**len(dut.core_pcie_inst.axil_app_ctrl_araddr), ext=True, prefetch=True)
# Ethernet
self.port_mac = []
eth_int_if_width = len(dut.core_pcie_inst.core_inst.iface[0].port[0].rx_async_fifo_inst.m_axis_tdata)
eth_clock_period = 6.4
eth_speed = 10e9
if eth_int_if_width == 64:
# 10G
eth_clock_period = 6.4
eth_speed = 10e9
elif eth_int_if_width == 128:
# 25G
eth_clock_period = 2.56
eth_speed = 25e9
elif eth_int_if_width == 512:
# 100G
eth_clock_period = 3.102
eth_speed = 100e9
for iface in dut.core_pcie_inst.core_inst.iface:
for port in iface.port:
cocotb.start_soon(Clock(port.port_rx_clk, eth_clock_period, units="ns").start())
cocotb.start_soon(Clock(port.port_tx_clk, eth_clock_period, units="ns").start())
port.port_rx_rst.setimmediatevalue(0)
port.port_tx_rst.setimmediatevalue(0)
mac = EthMac(
tx_clk=port.port_tx_clk,
tx_rst=port.port_tx_rst,
tx_bus=AxiStreamBus.from_prefix(port, "axis_tx"),
tx_ptp_time=port.ptp.tx_ptp_cdc_inst.output_ts,
tx_ptp_ts=port.ptp.axis_tx_ptp_ts,
tx_ptp_ts_tag=port.ptp.axis_tx_ptp_ts_tag,
tx_ptp_ts_valid=port.ptp.axis_tx_ptp_ts_valid,
rx_clk=port.port_rx_clk,
rx_rst=port.port_rx_rst,
rx_bus=AxiStreamBus.from_prefix(port, "axis_rx"),
rx_ptp_time=port.ptp.rx_ptp_cdc_inst.output_ts,
ifg=12, speed=eth_speed
)
self.port_mac.append(mac)
dut.ctrl_reg_wr_wait.setimmediatevalue(0)
dut.ctrl_reg_wr_ack.setimmediatevalue(0)
dut.ctrl_reg_rd_data.setimmediatevalue(0)
dut.ctrl_reg_rd_wait.setimmediatevalue(0)
dut.ctrl_reg_rd_ack.setimmediatevalue(0)
dut.ptp_sample_clk.setimmediatevalue(0)
dut.s_axis_stat_tdata.setimmediatevalue(0)
dut.s_axis_stat_tid.setimmediatevalue(0)
dut.s_axis_stat_tvalid.setimmediatevalue(0)
self.loopback_enable = False
cocotb.start_soon(self._run_loopback())
async def init(self):
for mac in self.port_mac:
mac.rx.reset.setimmediatevalue(0)
mac.tx.reset.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
for mac in self.port_mac:
mac.rx.reset.setimmediatevalue(1)
mac.tx.reset.setimmediatevalue(1)
await FallingEdge(self.dut.rst)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
for mac in self.port_mac:
mac.rx.reset.setimmediatevalue(0)
mac.tx.reset.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk)
if self.loopback_enable:
for mac in self.port_mac:
if not mac.tx.empty():
await mac.rx.send(await mac.tx.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_pcie_dev(tb.rc, tb.dev.functions[0].pcie_id)
for interface in tb.driver.interfaces:
await interface.open()
# enable queues
tb.log.info("Enable queues")
for interface in tb.driver.interfaces:
await interface.sched_blocks[0].schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
for k in range(interface.tx_queue_count):
await interface.sched_blocks[0].schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
# wait for all writes to complete
await tb.driver.hw_regs.read_dword(0)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
for interface in tb.driver.interfaces:
data = bytearray([x % 256 for x in range(1024)])
await interface.start_xmit(data, 0)
pkt = await tb.port_mac[interface.index*interface.port_count].tx.recv()
tb.log.info("Packet: %s", pkt)
await tb.port_mac[interface.index*interface.port_count].rx.send(pkt)
pkt = await interface.recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
pkt = await tb.port_mac[0].tx.recv()
tb.log.info("Packet: %s", pkt)
await tb.port_mac[0].rx.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Jumbo frames")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(9014)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
if len(tb.driver.interfaces) > 1:
tb.log.info("All interfaces")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for k, p in enumerate(pkts):
await tb.driver.interfaces[k % len(tb.driver.interfaces)].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[k % len(tb.driver.interfaces)].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
if len(tb.driver.interfaces[0].sched_blocks) > 1:
tb.log.info("All interface 0 scheduler blocks")
for block in tb.driver.interfaces[0].sched_blocks:
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000001)
for k in range(block.interface.tx_queue_count):
if k % len(tb.driver.interfaces[0].sched_blocks) == block.index:
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000003)
else:
await block.schedulers[0].hw_regs.write_dword(4*k, 0x00000000)
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for k, p in enumerate(pkts):
await tb.driver.interfaces[0].start_xmit(p, k % len(tb.driver.interfaces[0].sched_blocks))
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
# assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
for block in tb.driver.interfaces[0].sched_blocks[1:]:
await block.schedulers[0].rb.write_dword(mqnic.MQNIC_RB_SCHED_RR_REG_CTRL, 0x00000000)
tb.log.info("Read statistics counters")
await Timer(2000, 'ns')
lst = []
for k in range(64):
lst.append(await tb.driver.hw_regs.read_dword(0x010000+k*8))
print(lst)
tb.log.info("Test AXI lite interface to application")
await tb.driver.app_hw_regs.write_dword(0, 0x11223344)
print(await tb.driver.app_hw_regs.read_dword(0))
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
@pytest.mark.parametrize(("if_count", "ports_per_if", "axis_pcie_data_width",
"axis_eth_data_width", "axis_eth_sync_data_width"), [
(1, 1, 256, 64, 64),
(2, 1, 256, 64, 64),
(1, 2, 256, 64, 64),
(1, 1, 256, 64, 128),
(1, 1, 512, 64, 64),
(1, 1, 512, 64, 128),
(1, 1, 512, 512, 512),
])
def test_mqnic_core_pcie_us(request, if_count, ports_per_if, axis_pcie_data_width,
axis_eth_data_width, axis_eth_sync_data_width):
dut = "mqnic_core_pcie_us"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, "common", f"{dut}.v"),
os.path.join(rtl_dir, "common", "mqnic_core.v"),
os.path.join(rtl_dir, "common", "mqnic_core_pcie.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_interface_tx.v"),
os.path.join(rtl_dir, "common", "mqnic_interface_rx.v"),
os.path.join(rtl_dir, "common", "mqnic_egress.v"),
os.path.join(rtl_dir, "common", "mqnic_ingress.v"),
os.path.join(rtl_dir, "common", "mqnic_l2_egress.v"),
os.path.join(rtl_dir, "common", "mqnic_l2_ingress.v"),
os.path.join(rtl_dir, "common", "mqnic_ptp.v"),
os.path.join(rtl_dir, "common", "mqnic_ptp_clock.v"),
os.path.join(rtl_dir, "common", "mqnic_ptp_perout.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_fifo.v"),
os.path.join(rtl_dir, "common", "rx_fifo.v"),
os.path.join(rtl_dir, "common", "tx_req_mux.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "stats_counter.v"),
os.path.join(rtl_dir, "common", "stats_collect.v"),
os.path.join(rtl_dir, "common", "stats_pcie_if.v"),
os.path.join(rtl_dir, "common", "stats_pcie_tlp.v"),
os.path.join(rtl_dir, "common", "stats_dma_if_pcie.v"),
os.path.join(rtl_dir, "common", "stats_dma_latency.v"),
os.path.join(rtl_dir, "common", "mqnic_tx_scheduler_block_rr.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "mqnic_app_block.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(axi_rtl_dir, "axil_crossbar.v"),
os.path.join(axi_rtl_dir, "axil_crossbar_addr.v"),
os.path.join(axi_rtl_dir, "axil_crossbar_rd.v"),
os.path.join(axi_rtl_dir, "axil_crossbar_wr.v"),
os.path.join(axi_rtl_dir, "axil_ram.v"),
os.path.join(axi_rtl_dir, "axil_reg_if.v"),
os.path.join(axi_rtl_dir, "axil_reg_if_rd.v"),
os.path.join(axi_rtl_dir, "axil_reg_if_wr.v"),
os.path.join(axi_rtl_dir, "axil_register_rd.v"),
os.path.join(axi_rtl_dir, "axil_register_wr.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_demux.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_pipeline_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "pcie_axil_master.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_demux.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_demux_bar.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_desc_mux.v"),
os.path.join(pcie_rtl_dir, "dma_ram_demux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_ram_demux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_rc.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_rq.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_cc.v"),
os.path.join(pcie_rtl_dir, "pcie_us_if_cq.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
# Structural configuration
parameters['IF_COUNT'] = if_count
parameters['PORTS_PER_IF'] = ports_per_if
parameters['SCHED_PER_IF'] = ports_per_if
# PTP configuration
parameters['PTP_CLOCK_PIPELINE'] = 0
parameters['PTP_USE_SAMPLE_CLOCK'] = 0
parameters['PTP_SEPARATE_RX_CLOCK'] = 0
parameters['PTP_PORT_CDC_PIPELINE'] = 0
parameters['PTP_PEROUT_ENABLE'] = 0
parameters['PTP_PEROUT_COUNT'] = 1
# Queue manager configuration (interface)
parameters['EVENT_QUEUE_OP_TABLE_SIZE'] = 32
parameters['TX_QUEUE_OP_TABLE_SIZE'] = 32
parameters['RX_QUEUE_OP_TABLE_SIZE'] = 32
parameters['TX_CPL_QUEUE_OP_TABLE_SIZE'] = parameters['TX_QUEUE_OP_TABLE_SIZE']
parameters['RX_CPL_QUEUE_OP_TABLE_SIZE'] = parameters['RX_QUEUE_OP_TABLE_SIZE']
parameters['EVENT_QUEUE_INDEX_WIDTH'] = 5
parameters['TX_QUEUE_INDEX_WIDTH'] = 13
parameters['RX_QUEUE_INDEX_WIDTH'] = 8
parameters['TX_CPL_QUEUE_INDEX_WIDTH'] = parameters['TX_QUEUE_INDEX_WIDTH']
parameters['RX_CPL_QUEUE_INDEX_WIDTH'] = parameters['RX_QUEUE_INDEX_WIDTH']
parameters['EVENT_QUEUE_PIPELINE'] = 3
parameters['TX_QUEUE_PIPELINE'] = 3 + max(parameters['TX_QUEUE_INDEX_WIDTH']-12, 0)
parameters['RX_QUEUE_PIPELINE'] = 3 + max(parameters['RX_QUEUE_INDEX_WIDTH']-12, 0)
parameters['TX_CPL_QUEUE_PIPELINE'] = parameters['TX_QUEUE_PIPELINE']
parameters['RX_CPL_QUEUE_PIPELINE'] = parameters['RX_QUEUE_PIPELINE']
# TX and RX engine configuration (port)
parameters['TX_DESC_TABLE_SIZE'] = 32
parameters['RX_DESC_TABLE_SIZE'] = 32
# Scheduler configuration (port)
parameters['TX_SCHEDULER_OP_TABLE_SIZE'] = parameters['TX_DESC_TABLE_SIZE']
parameters['TX_SCHEDULER_PIPELINE'] = parameters['TX_QUEUE_PIPELINE']
parameters['TDMA_INDEX_WIDTH'] = 6
# Timestamping configuration (port)
parameters['PTP_TS_ENABLE'] = 1
parameters['TX_PTP_TS_FIFO_DEPTH'] = 32
parameters['RX_PTP_TS_FIFO_DEPTH'] = 32
# Interface configuration (port)
parameters['TX_CHECKSUM_ENABLE'] = 1
parameters['RX_RSS_ENABLE'] = 1
parameters['RX_HASH_ENABLE'] = 1
parameters['RX_CHECKSUM_ENABLE'] = 1
parameters['TX_FIFO_DEPTH'] = 32768
parameters['RX_FIFO_DEPTH'] = 131072
parameters['MAX_TX_SIZE'] = 9214
parameters['MAX_RX_SIZE'] = 9214
parameters['TX_RAM_SIZE'] = 131072
parameters['RX_RAM_SIZE'] = 131072
# Application block configuration
parameters['APP_ENABLE'] = 1
parameters['APP_CTRL_ENABLE'] = 1
parameters['APP_DMA_ENABLE'] = 1
parameters['APP_AXIS_DIRECT_ENABLE'] = 1
parameters['APP_AXIS_SYNC_ENABLE'] = 1
parameters['APP_AXIS_IF_ENABLE'] = 1
parameters['APP_STAT_ENABLE'] = 1
# DMA interface configuration
parameters['DMA_LEN_WIDTH'] = 16
parameters['DMA_TAG_WIDTH'] = 16
parameters['RAM_ADDR_WIDTH'] = (max(parameters['TX_RAM_SIZE'], parameters['RX_RAM_SIZE'])-1).bit_length()
parameters['RAM_PIPELINE'] = 2
# PCIe interface configuration
parameters['AXIS_PCIE_DATA_WIDTH'] = axis_pcie_data_width
parameters['PF_COUNT'] = 1
parameters['VF_COUNT'] = 0
parameters['PCIE_TAG_COUNT'] = 64
parameters['PCIE_DMA_READ_OP_TABLE_SIZE'] = parameters['PCIE_TAG_COUNT']
parameters['PCIE_DMA_READ_TX_LIMIT'] = 16
parameters['PCIE_DMA_READ_TX_FC_ENABLE'] = 1
parameters['PCIE_DMA_WRITE_OP_TABLE_SIZE'] = 16
parameters['PCIE_DMA_WRITE_TX_LIMIT'] = 3
parameters['PCIE_DMA_WRITE_TX_FC_ENABLE'] = 1
parameters['MSI_COUNT'] = 32
# AXI lite interface configuration (control)
parameters['AXIL_CTRL_DATA_WIDTH'] = 32
parameters['AXIL_CTRL_ADDR_WIDTH'] = 24
parameters['AXIL_CSR_PASSTHROUGH_ENABLE'] = 0
# AXI lite interface configuration (application control)
parameters['AXIL_APP_CTRL_DATA_WIDTH'] = parameters['AXIL_CTRL_DATA_WIDTH']
parameters['AXIL_APP_CTRL_ADDR_WIDTH'] = 24
# Ethernet interface configuration
parameters['AXIS_ETH_DATA_WIDTH'] = axis_eth_data_width
parameters['AXIS_ETH_SYNC_DATA_WIDTH'] = axis_eth_sync_data_width
parameters['AXIS_ETH_RX_USE_READY'] = 0
parameters['AXIS_ETH_TX_PIPELINE'] = 0
parameters['AXIS_ETH_TX_FIFO_PIPELINE'] = 2
parameters['AXIS_ETH_TX_TS_PIPELINE'] = 0
parameters['AXIS_ETH_RX_PIPELINE'] = 0
parameters['AXIS_ETH_RX_FIFO_PIPELINE'] = 2
# Statistics counter subsystem
parameters['STAT_ENABLE'] = 1
parameters['STAT_DMA_ENABLE'] = 1
parameters['STAT_PCIE_ENABLE'] = 1
parameters['STAT_INC_WIDTH'] = 24
parameters['STAT_ID_WIDTH'] = 12
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| 37.916873
| 124
| 0.653382
|
8880ecb98932b24696ef1824785d9ed1a78282e6
| 18,081
|
py
|
Python
|
bddtests/peer/admin_pb2.py
|
a20351766/mchain
|
159b9aebb53257d98528070c2863897e2e610643
|
[
"Apache-2.0"
] | null | null | null |
bddtests/peer/admin_pb2.py
|
a20351766/mchain
|
159b9aebb53257d98528070c2863897e2e610643
|
[
"Apache-2.0"
] | null | null | null |
bddtests/peer/admin_pb2.py
|
a20351766/mchain
|
159b9aebb53257d98528070c2863897e2e610643
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/admin.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/admin.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x10peer/admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\"8\n\x0fLogLevelRequest\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t\"9\n\x10LogLevelResponse\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t2\xd5\x02\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12H\n\x11GetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x12H\n\x11SetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x42]\n\"org.hyperledger.mchain.protos.peerB\x0c\x41\x64minPackageZ)github.com/hyperledger/mchain/protos/peerb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='protos.ServerStatus.StatusCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=123,
serialized_end=212,
)
_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE)
_SERVERSTATUS = _descriptor.Descriptor(
name='ServerStatus',
full_name='protos.ServerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='protos.ServerStatus.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVERSTATUS_STATUSCODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=212,
)
_LOGLEVELREQUEST = _descriptor.Descriptor(
name='LogLevelRequest',
full_name='protos.LogLevelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelRequest.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelRequest.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=214,
serialized_end=270,
)
_LOGLEVELRESPONSE = _descriptor.Descriptor(
name='LogLevelResponse',
full_name='protos.LogLevelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelResponse.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelResponse.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=272,
serialized_end=329,
)
_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE
_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['LogLevelRequest'] = _LOGLEVELREQUEST
DESCRIPTOR.message_types_by_name['LogLevelResponse'] = _LOGLEVELRESPONSE
ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict(
DESCRIPTOR = _SERVERSTATUS,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.ServerStatus)
))
_sym_db.RegisterMessage(ServerStatus)
LogLevelRequest = _reflection.GeneratedProtocolMessageType('LogLevelRequest', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELREQUEST,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelRequest)
))
_sym_db.RegisterMessage(LogLevelRequest)
LogLevelResponse = _reflection.GeneratedProtocolMessageType('LogLevelResponse', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELRESPONSE,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelResponse)
))
_sym_db.RegisterMessage(LogLevelResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.mchain.protos.peerB\014AdminPackageZ)github.com/hyperledger/mchain/protos/peer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class AdminStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/protos.Admin/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.StartServer = channel.unary_unary(
'/protos.Admin/StartServer',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.StopServer = channel.unary_unary(
'/protos.Admin/StopServer',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.GetModuleLogLevel = channel.unary_unary(
'/protos.Admin/GetModuleLogLevel',
request_serializer=LogLevelRequest.SerializeToString,
response_deserializer=LogLevelResponse.FromString,
)
self.SetModuleLogLevel = channel.unary_unary(
'/protos.Admin/SetModuleLogLevel',
request_serializer=LogLevelRequest.SerializeToString,
response_deserializer=LogLevelResponse.FromString,
)
class AdminServicer(object):
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModuleLogLevel(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetModuleLogLevel(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StartServer': grpc.unary_unary_rpc_method_handler(
servicer.StartServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StopServer': grpc.unary_unary_rpc_method_handler(
servicer.StopServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'GetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.GetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
'SetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.SetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'protos.Admin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaAdminServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StartServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StopServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetModuleLogLevel(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def SetModuleLogLevel(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaAdminStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Return the serve status.
"""
raise NotImplementedError()
GetStatus.future = None
def StartServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
StartServer.future = None
def StopServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
StopServer.future = None
def GetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
GetModuleLogLevel.future = None
def SetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
SetModuleLogLevel.future = None
def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
}
response_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'GetStatus'): ServerStatus.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'StartServer'): ServerStatus.SerializeToString,
('protos.Admin', 'StopServer'): ServerStatus.SerializeToString,
}
method_implementations = {
('protos.Admin', 'GetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.GetModuleLogLevel),
('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('protos.Admin', 'SetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.SetModuleLogLevel),
('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer),
('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
}
response_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'GetStatus'): ServerStatus.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'StartServer'): ServerStatus.FromString,
('protos.Admin', 'StopServer'): ServerStatus.FromString,
}
cardinalities = {
'GetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'SetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'StartServer': cardinality.Cardinality.UNARY_UNARY,
'StopServer': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 42.048837
| 1,170
| 0.741109
|
55931dcdca6f97883a8ec17b8771901898214262
| 5,638
|
py
|
Python
|
image_classification/Shuffle_Transformer/config.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | 1
|
2022-01-12T00:46:53.000Z
|
2022-01-12T00:46:53.000Z
|
image_classification/Shuffle_Transformer/config.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | null | null | null |
image_classification/Shuffle_Transformer/config.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration
Configuration for data, model archtecture, and training, etc.
Config can be set by .yaml file or by argparser(limited usage)
"""
import os
from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.BASE = ['']
# data settings
_C.DATA = CN()
_C.DATA.BATCH_SIZE = 8 #1024 batch_size for single GPU
_C.DATA.BATCH_SIZE_EVAL = 8 #1024 batch_size for single GPU
_C.DATA.DATA_PATH = '/dataset/imagenet/' # path to dataset
_C.DATA.DATASET = 'imagenet2012' # dataset name
_C.DATA.IMAGE_SIZE = 224 # input image size
_C.DATA.CROP_PCT = 0.9 # input image scale ratio, scale is applied before centercrop in eval mode
_C.DATA.NUM_WORKERS = 4 # number of data loading threads
_C.DATA.IMAGENET_MEAN = [0.485, 0.456, 0.406] # [0.5, 0.5, 0.5]
_C.DATA.IMAGENET_STD = [0.229, 0.224, 0.225] # [0.5, 0.5, 0.5]
# model settings
_C.MODEL = CN()
_C.MODEL.TYPE = 'ShuffleTransformer'
_C.MODEL.NAME = 'ShuffleTransformer'
_C.MODEL.RESUME = None
_C.MODEL.PRETRAINED = None
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.DROPOUT = 0.0
_C.MODEL.ATTENTION_DROPOUT = 0.0
_C.MODEL.DROP_PATH = 0.1
# transformer settings
_C.MODEL.TRANS = CN()
_C.MODEL.TRANS.PATCH_SIZE = 4 # image_size = patch_size x window_size x num_windows
_C.MODEL.TRANS.WINDOW_SIZE = 7
_C.MODEL.TRANS.IN_CHANNELS = 3
_C.MODEL.TRANS.EMBED_DIM = 96 # same as HIDDEN_SIZE in ViT
_C.MODEL.TRANS.DEPTHS = [2, 2, 6, 2]
_C.MODEL.TRANS.NUM_HEADS = [3, 6, 12, 24]
_C.MODEL.TRANS.MLP_RATIO = 4.
_C.MODEL.TRANS.QKV_BIAS = True
_C.MODEL.TRANS.QK_SCALE = None
_C.MODEL.TRANS.APE = False # absolute positional embeddings
_C.MODEL.TRANS.PATCH_NORM = True
# training settings
_C.TRAIN = CN()
_C.TRAIN.LAST_EPOCH = 0
_C.TRAIN.NUM_EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 0.001
_C.TRAIN.WARMUP_START_LR = 5e-7
_C.TRAIN.END_LR = 5e-6
_C.TRAIN.GRAD_CLIP = 5.0
_C.TRAIN.ACCUM_ITER = 1
_C.TRAIN.LINEAR_SCALED_LR = None
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'warmupcosine'
_C.TRAIN.LR_SCHEDULER.MILESTONES = "30, 60, 90" # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 # only used in StepLRScheduler
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'AdamW'
_C.TRAIN.OPTIMIZER.EPS = 1e-8
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) # for adamW
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# train augmentation
_C.TRAIN.MIXUP_ALPHA = 0.8
_C.TRAIN.CUTMIX_ALPHA = 1.0
_C.TRAIN.CUTMIX_MINMAX = None
_C.TRAIN.MIXUP_PROB = 1.0
_C.TRAIN.MIXUP_SWITCH_PROB = 0.5
_C.TRAIN.MIXUP_MODE = 'batch'
_C.TRAIN.SMOOTHING = 0.1
_C.TRAIN.COLOR_JITTER = 0.4
_C.TRAIN.AUTO_AUGMENT = False #'rand-m9-mstd0.5-inc1'
_C.TRAIN.RAND_AUGMENT = False
_C.TRAIN.RANDOM_ERASE_PROB = 0.25
_C.TRAIN.RANDOM_ERASE_MODE = 'pixel'
_C.TRAIN.RANDOM_ERASE_COUNT = 1
_C.TRAIN.RANDOM_ERASE_SPLIT = False
# misc
_C.SAVE = "./output"
_C.TAG = "default"
_C.SAVE_FREQ = 1 # freq to save chpt
_C.REPORT_FREQ = 50 # freq to logging info
_C.VALIDATE_FREQ = 10 # freq to do validation
_C.SEED = 42
_C.EVAL = False # run evaluation only
_C.AMP = False
_C.LOCAL_RANK = 0
_C.NGPUS = -1
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as infile:
yaml_cfg = yaml.load(infile, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('merging config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
"""Update config by ArgumentParser
Args:
args: ArgumentParser contains options
Return:
config: updated config
"""
if args.cfg:
_update_config_from_file(config, args.cfg)
config.defrost()
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.image_size:
config.DATA.IMAGE_SIZE = args.image_size
if args.num_classes:
config.MODEL.NUM_CLASSES = args.num_classes
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.output is not None:
config.SAVE = args.output
if args.ngpus:
config.NGPUS = args.ngpus
if args.eval:
config.EVAL = True
config.DATA.BATCH_SIZE_EVAL = args.batch_size
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.last_epoch:
config.TRAIN.LAST_EPOCH = args.last_epoch
if args.amp: # only during training
if config.EVAL is True:
config.AMP = False
else:
config.AMP = True
#config.freeze()
return config
def get_config(cfg_file=None):
"""Return a clone of config or load from yaml file"""
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
| 30.311828
| 97
| 0.708585
|
02d51cb0fb090c8d6705dca7a4f9d22e784dfabb
| 1,025
|
py
|
Python
|
pygeopkg/core/field.py
|
ropable/pygeopkg
|
92ad57c6cb6372b8210f4d99053d05cde962b797
|
[
"MIT"
] | 4
|
2020-02-16T08:47:40.000Z
|
2021-11-09T09:19:32.000Z
|
pygeopkg/core/field.py
|
ropable/pygeopkg
|
92ad57c6cb6372b8210f4d99053d05cde962b797
|
[
"MIT"
] | 4
|
2021-09-16T02:53:48.000Z
|
2022-02-11T15:08:19.000Z
|
pygeopkg/core/field.py
|
ropable/pygeopkg
|
92ad57c6cb6372b8210f4d99053d05cde962b797
|
[
"MIT"
] | 4
|
2020-12-09T13:31:32.000Z
|
2021-11-09T09:19:39.000Z
|
"""
Field
"""
from pygeopkg.shared.enumeration import SQLFieldTypes
class Field(object):
"""
Field Object for GeoPackage
"""
def __init__(self, name, data_type, size=None):
"""
Initialize the Field class
:param name: The name of the field
:type name: str
:param data_type: The data type of the field
:type data_type: str
:param size: the size of the field (basically unecessary in sqlite)
"""
super(Field, self).__init__()
self.name = name
self.data_type = data_type
self.size = size
# End init built-in
def __repr__(self):
"""
String representation
"""
if (self.size and
self.data_type in (SQLFieldTypes.blob, SQLFieldTypes.text)):
return '{0} {1}{2}'.format(self.name, self.data_type, self.size)
return '{0} {1}'.format(self.name, self.data_type)
# End __repr__ built-in
# End Field class
if __name__ == '__main__':
pass
| 25
| 76
| 0.589268
|
fbc7307c040ec1c3faf818f0a54612c4f1e767aa
| 1,757
|
py
|
Python
|
day02/day02_part2.py
|
Nixxen/advent_of_code_2021
|
5dec9eec22272322b75961256d471876437b3597
|
[
"CC0-1.0"
] | null | null | null |
day02/day02_part2.py
|
Nixxen/advent_of_code_2021
|
5dec9eec22272322b75961256d471876437b3597
|
[
"CC0-1.0"
] | null | null | null |
day02/day02_part2.py
|
Nixxen/advent_of_code_2021
|
5dec9eec22272322b75961256d471876437b3597
|
[
"CC0-1.0"
] | null | null | null |
RUN_TEST = False
TEST_SOLUTION = 900
TEST_INPUT_FILE = "test_input_day_02.txt"
INPUT_FILE = "input_day_02.txt"
ARGS = []
def main_part2(
input_file,
):
with open(input_file) as file:
lines = list(map(lambda line: line.rstrip(), file.readlines()))
# Part 2
# down X increases your aim by X units.
# up X decreases your aim by X units.
# forward X does two things:
# It increases your horizontal position by X units.
# It increases your depth by your aim multiplied by X.
# "down" means aiming in the positive direction.
# calculate the horizontal position and depth you would have after
# following the planned course. What do you get if you multiply your
# final horizontal position by your final depth?
instructions = {}
instructions["aim"] = 0
for line in lines:
instruction, steps = line.split(" ")
match instruction:
case "forward":
instructions[instruction] = instructions.setdefault(instruction, 0) + int(steps)
instructions["depth"] = instructions.setdefault("depth", 0) + int(steps) * instructions["aim"]
case "up":
instructions["aim"] = instructions.setdefault("aim", 0) - int(steps)
case "down":
instructions["aim"] = instructions.setdefault("aim", 0) + int(steps)
final_position = instructions["forward"] * instructions["depth"]
solution = final_position
return solution
if __name__ == "__main__":
if RUN_TEST:
solution = main_part2(TEST_INPUT_FILE, *ARGS)
print(solution)
assert TEST_SOLUTION == solution
else:
solution = main_part2(INPUT_FILE, *ARGS)
print(solution)
| 30.824561
| 110
| 0.634035
|
286f8a8e0fb6a278ae5745a0544c0d092b803791
| 1,016
|
py
|
Python
|
pupil_invisible_monitor/src/pupil_invisible_monitor/texture.py
|
JuBepy/Gaze_Mouse
|
4ddea30b4f53deb744dac3f370e7f48baa3b99c2
|
[
"MIT"
] | null | null | null |
pupil_invisible_monitor/src/pupil_invisible_monitor/texture.py
|
JuBepy/Gaze_Mouse
|
4ddea30b4f53deb744dac3f370e7f48baa3b99c2
|
[
"MIT"
] | null | null | null |
pupil_invisible_monitor/src/pupil_invisible_monitor/texture.py
|
JuBepy/Gaze_Mouse
|
4ddea30b4f53deb744dac3f370e7f48baa3b99c2
|
[
"MIT"
] | null | null | null |
import logging
import sys
from pathlib import Path
import numpy as np
from pyglui.cygl.utils import Named_Texture
logger = logging.getLogger(__name__)
class PITextureController:
def _initialize(self):
self._texture = Named_Texture()
self.reset()
def update(self, frame):
if frame.yuv_buffer is not None:
self._texture.update_from_yuv_buffer(
frame.yuv_buffer, frame.width, frame.height
)
self.shape = frame.height, frame.width, 3
else:
self._texture.update_from_ndarray(frame.bgr)
self.shape = frame.bgr.shape
def draw(self):
try:
self._texture.draw()
except AttributeError:
self._initialize()
self._texture.draw()
def reset(self):
placeholder = np.ones((1080, 1088, 3), dtype=np.uint8) * 158
self._texture.update_from_ndarray(placeholder)
self.shape = placeholder.shape
| 26.736842
| 69
| 0.603346
|
eef058aa50d477867dd0ce10805fb68d510d7c91
| 5,563
|
py
|
Python
|
tests/components/switch/test_command_line.py
|
instantchow/home-assistant
|
6797365d4fd74328a0c9e961f652cfb37f48bc7d
|
[
"MIT"
] | null | null | null |
tests/components/switch/test_command_line.py
|
instantchow/home-assistant
|
6797365d4fd74328a0c9e961f652cfb37f48bc7d
|
[
"MIT"
] | null | null | null |
tests/components/switch/test_command_line.py
|
instantchow/home-assistant
|
6797365d4fd74328a0c9e961f652cfb37f48bc7d
|
[
"MIT"
] | null | null | null |
"""the tests for the Command line switch platform."""
import json
import os
import tempfile
import unittest
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
from tests.common import get_test_home_assistant
class TestCommandSwitch(unittest.TestCase):
"""Test the command switch."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_state_none(self):
"""Test with none state."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_value(self):
"""Test with state value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
'value_template': '{{ value=="1" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_json_value(self):
"""Test with state JSON value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
oncmd = json.dumps({'status': 'ok'})
offcmd = json.dumps({'status': 'nope'})
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo \'{}\' > {}'.format(oncmd, path),
'offcmd': 'echo \'{}\' > {}'.format(offcmd, path),
'value_template': '{{ value_json.status=="ok" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_code(self):
"""Test with state code."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
| 35.208861
| 66
| 0.523638
|
b07b27b6faee7d36fa3de5e37bdd76129fb571e9
| 996
|
py
|
Python
|
test/test_kdlehti.py
|
joniok/hybra-medialoader
|
ebc6df58287b5146917d66de69d807309cea4a8f
|
[
"MIT"
] | null | null | null |
test/test_kdlehti.py
|
joniok/hybra-medialoader
|
ebc6df58287b5146917d66de69d807309cea4a8f
|
[
"MIT"
] | 14
|
2016-10-14T15:09:42.000Z
|
2017-03-01T14:29:29.000Z
|
test/test_kdlehti.py
|
joniok/hybra-medialoader
|
ebc6df58287b5146917d66de69d807309cea4a8f
|
[
"MIT"
] | 1
|
2020-11-18T13:06:48.000Z
|
2020-11-18T13:06:48.000Z
|
import sys
import os
import filecmp
import importlib
import datetime
import common
path = os.path.abspath('.')
sys.path.append(path)
domain = 'kdlehti'
url = 'http://www.kdlehti.fi/2016/09/14/lahden-kristillisdemokraatit-vanhusten-hoitajamitoitus-sailytettava-nykyisella-tasolla/'
out = 'test/parser_out.txt'
module = importlib.import_module( 'sites.' + domain )
d = module.parse(url)
class TestParser:
@classmethod
def setup_class(cls):
common.initialise_file( out, d )
def test_file_exists(self):
common.file_exists(out)
def test_file_not_empty(self):
common.file_not_empty(out)
def test_file_contents_match(self):
common.file_contents_match(domain, out)
def test_dictionary_created(self):
common.dictionary_created(d)
def test_dictionary_contains_right_keys(self):
common.dictionary_contains_right_keys(d)
def test_dictionary_values_correct_type(self):
common.dictionary_values_correct_type(d)
| 24.292683
| 128
| 0.73996
|
33a6bcadd5038aeb7ddb1ad837c7d24ca74d6acf
| 5,743
|
py
|
Python
|
bot/decorators.py
|
Ayplow/bot
|
71a3ac9382851845dcb26609d64299bd69b0f0f5
|
[
"MIT"
] | null | null | null |
bot/decorators.py
|
Ayplow/bot
|
71a3ac9382851845dcb26609d64299bd69b0f0f5
|
[
"MIT"
] | null | null | null |
bot/decorators.py
|
Ayplow/bot
|
71a3ac9382851845dcb26609d64299bd69b0f0f5
|
[
"MIT"
] | null | null | null |
import logging
import random
from asyncio import Lock, sleep
from contextlib import suppress
from functools import wraps
from typing import Any, Callable, Container, Optional
from weakref import WeakValueDictionary
from discord import Colour, Embed
from discord.errors import NotFound
from discord.ext import commands
from discord.ext.commands import CheckFailure, Context
from bot.constants import ERROR_REPLIES, RedirectOutput
from bot.utils.checks import with_role_check, without_role_check
log = logging.getLogger(__name__)
class InChannelCheckFailure(CheckFailure):
"""Raised when a check fails for a message being sent in a whitelisted channel."""
def __init__(self, *channels: int):
self.channels = channels
channels_str = ', '.join(f"<#{c_id}>" for c_id in channels)
super().__init__(f"Sorry, but you may only use this command within {channels_str}.")
def in_channel(*channels: int, bypass_roles: Container[int] = None) -> Callable:
"""Checks that the message is in a whitelisted channel or optionally has a bypass role."""
def predicate(ctx: Context) -> bool:
"""In-channel checker predicate."""
if ctx.channel.id in channels:
log.debug(f"{ctx.author} tried to call the '{ctx.command.name}' command. "
f"The command was used in a whitelisted channel.")
return True
if bypass_roles:
if any(r.id in bypass_roles for r in ctx.author.roles):
log.debug(f"{ctx.author} tried to call the '{ctx.command.name}' command. "
f"The command was not used in a whitelisted channel, "
f"but the author had a role to bypass the in_channel check.")
return True
log.debug(f"{ctx.author} tried to call the '{ctx.command.name}' command. "
f"The in_channel check failed.")
raise InChannelCheckFailure(*channels)
return commands.check(predicate)
def with_role(*role_ids: int) -> Callable:
"""Returns True if the user has any one of the roles in role_ids."""
async def predicate(ctx: Context) -> bool:
"""With role checker predicate."""
return with_role_check(ctx, *role_ids)
return commands.check(predicate)
def without_role(*role_ids: int) -> Callable:
"""Returns True if the user does not have any of the roles in role_ids."""
async def predicate(ctx: Context) -> bool:
return without_role_check(ctx, *role_ids)
return commands.check(predicate)
def locked() -> Callable:
"""
Allows the user to only run one instance of the decorated command at a time.
Subsequent calls to the command from the same author are ignored until the command has completed invocation.
This decorator has to go before (below) the `command` decorator.
"""
def wrap(func: Callable) -> Callable:
func.__locks = WeakValueDictionary()
@wraps(func)
async def inner(self: Callable, ctx: Context, *args, **kwargs) -> Optional[Any]:
lock = func.__locks.setdefault(ctx.author.id, Lock())
if lock.locked():
embed = Embed()
embed.colour = Colour.red()
log.debug(f"User tried to invoke a locked command.")
embed.description = (
"You're already using this command. Please wait until it is done before you use it again."
)
embed.title = random.choice(ERROR_REPLIES)
await ctx.send(embed=embed)
return
async with func.__locks.setdefault(ctx.author.id, Lock()):
return await func(self, ctx, *args, **kwargs)
return inner
return wrap
def redirect_output(destination_channel: int, bypass_roles: Container[int] = None) -> Callable:
"""
Changes the channel in the context of the command to redirect the output to a certain channel.
Redirect is bypassed if the author has a role to bypass redirection.
"""
def wrap(func: Callable) -> Callable:
@wraps(func)
async def inner(self: Callable, ctx: Context, *args, **kwargs) -> Any:
if ctx.channel.id == destination_channel:
log.trace(f"Command {ctx.command.name} was invoked in destination_channel, not redirecting")
return await func(self, ctx, *args, **kwargs)
if bypass_roles and any(role.id in bypass_roles for role in ctx.author.roles):
log.trace(f"{ctx.author} has role to bypass output redirection")
return await func(self, ctx, *args, **kwargs)
redirect_channel = ctx.guild.get_channel(destination_channel)
old_channel = ctx.channel
log.trace(f"Redirecting output of {ctx.author}'s command '{ctx.command.name}' to {redirect_channel.name}")
ctx.channel = redirect_channel
await ctx.channel.send(f"Here's the output of your command, {ctx.author.mention}")
await func(self, ctx, *args, **kwargs)
message = await old_channel.send(
f"Hey, {ctx.author.mention}, you can find the output of your command here: "
f"{redirect_channel.mention}"
)
if RedirectOutput.delete_invocation:
await sleep(RedirectOutput.delete_delay)
with suppress(NotFound):
await message.delete()
log.trace("Redirect output: Deleted user redirection message")
with suppress(NotFound):
await ctx.message.delete()
log.trace("Redirect output: Deleted invocation message")
return inner
return wrap
| 40.160839
| 118
| 0.637472
|
671b1025e44870e37e3387ffad2b94748a19e9ba
| 3,096
|
py
|
Python
|
src/sampler.py
|
matan44/deep-RL-trading
|
c3326769d7ff33e28de538594c22096dd72f8d2a
|
[
"MIT"
] | null | null | null |
src/sampler.py
|
matan44/deep-RL-trading
|
c3326769d7ff33e28de538594c22096dd72f8d2a
|
[
"MIT"
] | null | null | null |
src/sampler.py
|
matan44/deep-RL-trading
|
c3326769d7ff33e28de538594c22096dd72f8d2a
|
[
"MIT"
] | null | null | null |
import random, os
import numpy as np
import pandas as pd
class RandomEpisodeSampler:
def __init__(self, train_dates, test_dates, source_path, mode='train'):
self.title = 'random_episode_sampler_eurusd'
self.n_var = 1
self.high_res_window = 80
self.low_res_window = 16
self.window_episode = self.high_res_window + self.low_res_window
self.train_dates = train_dates
self.test_dates = test_dates
self._start_time = None
if mode == 'train':
dates = self.train_dates
elif mode == 'out-of-sample':
dates = self.test_dates
else:
raise ValueError('Unknown mode {}'.format(mode))
self._segments = pd.concat([pd.read_feather(os.path.join(
source_path, '/episodes_v0/{}_{}_{}'.format('eur_usd', date.strftime('%Y%m%d'), '6s_segments')
)) for date in dates]).reset_index(drop=True)
self._min_max = pd.concat([
pd.read_feather(os.path.join(
source_path, '/episodes_v0', '{}_{}_{}'.format('eur_usd', date.strftime('%Y%m%d'), '300s_min_max')
))[['point_1_timestamp', 'point_1_price']]
for date in dates
]).reset_index(drop=True).rename({'point_1_timestamp': 'timestamp', 'point_1_price': 'price'}, axis=1)
self.start_times = self._min_max[
(
(self._min_max.timestamp.dt.hour > 7) |
(
(self._min_max.timestamp.dt.minute > 30) &
(self._min_max.timestamp.dt.hour > 6)
)
) &
(self._min_max.timestamp.dt.hour < 19)
][['timestamp']].copy().reset_index(drop=True)
self.shuffled_start_time_keys = list(range(len(self.start_times)))
random.shuffle(self.shuffled_start_time_keys)
self.start_time_key = None
def get_episode_data(self, start_time):
episode_segments = self._segments[
(self._segments.timestamp <= (start_time + np.timedelta64(12, 'm'))) &
(self._segments.timestamp >= (start_time - np.timedelta64(8, 'm')))
].reset_index(drop=True)
episode_min_max = self._min_max[
(self._min_max.timestamp <= (start_time + np.timedelta64(12, 'm'))) &
(self._min_max.timestamp >= (start_time - np.timedelta64(60, 'm')))
].reset_index(drop=True)
return episode_segments, episode_min_max
def sample(self, start_time_key=None):
if start_time_key is None:
if not self.shuffled_start_time_keys:
self.shuffled_start_time_keys = list(range(len(self.start_times)))
random.shuffle(self.shuffled_start_time_keys)
self.start_time_key = self.shuffled_start_time_keys.pop()
else:
self.start_time_key = start_time_key
self._start_time = self.start_times.loc[self.start_time_key].timestamp
segments, min_max = self.get_episode_data(self._start_time)
return segments, min_max, self._start_time, str(pd.to_datetime(self._start_time))
| 44.228571
| 114
| 0.61531
|
57cf61b68c08df5ee698cc6a519ee2ba87ea24f0
| 6,023
|
py
|
Python
|
tests/integration/teams/test_nfl_integration.py
|
ajhodges/sportsreference
|
8cb38db3ff2c38806cb4a68482208fd5e0f3084a
|
[
"MIT"
] | null | null | null |
tests/integration/teams/test_nfl_integration.py
|
ajhodges/sportsreference
|
8cb38db3ff2c38806cb4a68482208fd5e0f3084a
|
[
"MIT"
] | null | null | null |
tests/integration/teams/test_nfl_integration.py
|
ajhodges/sportsreference
|
8cb38db3ff2c38806cb4a68482208fd5e0f3084a
|
[
"MIT"
] | null | null | null |
import mock
import os
import pandas as pd
import pytest
from flexmock import flexmock
from sportsreference import utils
from sportsreference.nfl.constants import SEASON_PAGE_URL
from sportsreference.nfl.teams import Teams
MONTH = 9
YEAR = 2017
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'nfl_stats', filename)
return open('%s' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
if div == 'div#all_team_stats':
return read_file('%s_all_team_stats.html' % YEAR)
elif div == 'table#AFC':
return read_file('%s_afc.html' % YEAR)
else:
return read_file('%s_nfc.html' % YEAR)
html_contents = read_file('%s.html' % YEAR)
return MockPQ(html_contents)
def mock_request(url):
class MockRequest:
def __init__(self, html_contents, status_code=200):
self.status_code = status_code
self.html_contents = html_contents
self.text = html_contents
if str(YEAR) in url:
return MockRequest('good')
else:
return MockRequest('bad', status_code=404)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNFLIntegration:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'rank': 6,
'abbreviation': 'KAN',
'name': 'Kansas City Chiefs',
'wins': 10,
'losses': 6,
'win_percentage': .625,
'games_played': 16,
'points_for': 415,
'points_against': 339,
'points_difference': 76,
'margin_of_victory': 4.8,
'strength_of_schedule': -1.3,
'simple_rating_system': 3.4,
'offensive_simple_rating_system': 3.8,
'defensive_simple_rating_system': -0.3,
'yards': 6007,
'plays': 985,
'yards_per_play': 6.1,
'turnovers': 11,
'fumbles': 3,
'first_downs': 322,
'pass_completions': 363,
'pass_attempts': 543,
'pass_yards': 4104,
'pass_touchdowns': 26,
'interceptions': 8,
'pass_net_yards_per_attempt': 7.1,
'pass_first_downs': 198,
'rush_attempts': 405,
'rush_yards': 1903,
'rush_touchdowns': 12,
'rush_yards_per_attempt': 4.7,
'rush_first_downs': 95,
'penalties': 118,
'yards_from_penalties': 1044,
'first_downs_from_penalties': 29,
'percent_drives_with_points': 44.9,
'percent_drives_with_turnovers': 6.3,
'points_contributed_by_offense': 115.88
}
self.abbreviations = [
'RAM', 'NWE', 'PHI', 'NOR', 'JAX', 'KAN', 'DET', 'PIT', 'RAV',
'MIN', 'SEA', 'CAR', 'SDG', 'DAL', 'ATL', 'WAS', 'HTX', 'TAM',
'OTI', 'SFO', 'GNB', 'BUF', 'RAI', 'NYJ', 'CRD', 'CIN', 'DEN',
'MIA', 'CHI', 'CLT', 'NYG', 'CLE'
]
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.teams = Teams()
def test_nfl_integration_returns_correct_number_of_teams(self):
assert len(self.teams) == len(self.abbreviations)
def test_nfl_integration_returns_correct_attributes_for_team(self):
kansas = self.teams('KAN')
for attribute, value in self.results.items():
assert getattr(kansas, attribute) == value
def test_nfl_integration_returns_correct_team_abbreviations(self):
for team in self.teams:
assert team.abbreviation in self.abbreviations
def test_nfl_integration_dataframe_returns_dataframe(self):
df = pd.DataFrame([self.results], index=['KAN'])
kansas = self.teams('KAN')
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, kansas.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_nfl_integration_all_teams_dataframe_returns_dataframe(self):
result = self.teams.dataframes.drop_duplicates(keep=False)
assert len(result) == len(self.abbreviations)
assert set(result.columns.values) == set(self.results.keys())
def test_nfl_invalid_team_name_raises_value_error(self):
with pytest.raises(ValueError):
self.teams('INVALID_NAME')
def test_nfl_empty_page_returns_no_teams(self):
flexmock(utils) \
.should_receive('_no_data_found') \
.once()
flexmock(utils) \
.should_receive('_get_stats_table') \
.and_return(None)
teams = Teams()
assert len(teams) == 0
class TestNFLIntegrationInvalidYear:
@mock.patch('requests.get', side_effect=mock_pyquery)
@mock.patch('requests.head', side_effect=mock_request)
def test_invalid_default_year_reverts_to_previous_year(self,
*args,
**kwargs):
flexmock(utils) \
.should_receive('_find_year_for_season') \
.and_return(2018)
teams = Teams()
for team in teams:
assert team._year == '2017'
| 33.648045
| 78
| 0.588909
|
df4dea355fb0b623cb6164b6745d0eaee2f4d500
| 1,876
|
py
|
Python
|
insertion_sort.py
|
shubham3121/algorithms
|
476d13ce6550f3cc500e161e2dba23bde0d576b0
|
[
"MIT"
] | null | null | null |
insertion_sort.py
|
shubham3121/algorithms
|
476d13ce6550f3cc500e161e2dba23bde0d576b0
|
[
"MIT"
] | null | null | null |
insertion_sort.py
|
shubham3121/algorithms
|
476d13ce6550f3cc500e161e2dba23bde0d576b0
|
[
"MIT"
] | null | null | null |
class BinaryInsertionSort:
"""
Binary Insertion sort performs search and compare
operation using binary search. Thus the search and compare operation
takes O(log(n)) time, while swaps takes O(n) time.
Time Complexity: O(n*(log(n) + n)) ≈ O(n^2)
"""
def __init__(self, array):
self.array = array
self.size = len(array)
def sort(self):
"""
Sorts the elements of the array in ascending order
Time complexity: O(n^2)
Space complexity: O(1)
:param array:
:return: sorted array
"""
if self.size < 2:
return self.array
for i in range(1, self.size):
# search and compare function
idx = self._search_lower_element_idx(key=i)
# swap function
self._swap_elements(src=i, dest=idx)
return self.array
def _swap_elements(self, src, dest):
"""
Swaps all elements in reverse order i.e. starting from src
index till the destination index.
Time Complexity: O(n)
"""
while src > dest:
if self.array[src] < self.array[src-1]:
temp = self.array[src]
self.array[src] = self.array[src - 1]
self.array[src - 1] = temp
src = src - 1
def _search_lower_element_idx(self, key):
"""
Search for the index in the array[0, key-1], whose value is
less than the value at the key index of the array.
Compare & search Time Complexity: O(log(n))
"""
start, end = 0, len(self.array[:key-1])
mid = (start + end) // 2
while start < end:
if self.array[mid] > self.array[key]:
end = mid
else:
return mid + 1
mid = (start + end) // 2
return mid
| 26.055556
| 72
| 0.533049
|
6febdccbf1e625417b99f1761984ad661da60dc4
| 4,391
|
py
|
Python
|
robot_tasks/scripts/c3_ground.py
|
grvcTeam/mbzirc2020
|
e474cee6458a3bb14f858da43349c5154b3c8cdd
|
[
"MIT"
] | null | null | null |
robot_tasks/scripts/c3_ground.py
|
grvcTeam/mbzirc2020
|
e474cee6458a3bb14f858da43349c5154b3c8cdd
|
[
"MIT"
] | null | null | null |
robot_tasks/scripts/c3_ground.py
|
grvcTeam/mbzirc2020
|
e474cee6458a3bb14f858da43349c5154b3c8cdd
|
[
"MIT"
] | 2
|
2021-01-01T06:32:04.000Z
|
2021-06-28T09:09:16.000Z
|
#!/usr/bin/env python
# PARAMETER SPECIFICATION
# Number of UAVs per team Maximum of 3
# Number of UGVs per team 1
# Arena size 50mx60mx20m
# Tower height Maximum of 18m
# Tower base and interior specifications TBD
# Location of simulated fires Up to 16m in height, inside the arena.
# Environment Outdoor and indoor
# Mode of operation Autonomous; manual allowed but penalized
# RTK/DGPS positioning Allowed but penalized
# Challenge duration 20 Minutes
# Communications TBD
# Extinguisher types considered Water carrying container 1-3 liters. Some examples shown in Figure 2.
# Maximum size of UAV 1.2m x 1.2m x 0.5m
# Maximum size of UGV 1.7m x 1.5m x 2m
# import copy
import math
import rospy
import smach
import yaml
import mbzirc_comm_objs.msg as msg
from geometry_msgs.msg import PoseStamped
from tasks.move import TakeOff, GoTo, Land
from tasks.fire import ExtinguishGroundFire
from std_srvs.srv import Trigger, TriggerResponse
from utils.manager import TaskManager
from utils.robot import RobotProxy
# TODO: All these parameters from config!
field_width = 60
field_height = 50
column_count = 4 # 6 # TODO: as a function of fov
class CentralUnit(object):
def __init__(self):
# Read parameters
conf_file = rospy.get_param('~conf_file', 'config/conf_ch3_ground.yaml')
self.robot_id = rospy.get_param('~uav_id', '6')
self.height = rospy.get_param('~height', 3.0)
with open(conf_file,'r') as file:
ground_fires_conf = yaml.safe_load(file)
self.path = []
if 'path' in ground_fires_conf:
for waypoint in ground_fires_conf['path']:
wp = PoseStamped()
wp.header.frame_id = 'arena'
wp.pose.position.x = waypoint[0]
wp.pose.position.y = waypoint[1]
wp.pose.position.z = self.height
wp.pose.orientation.x = 0
wp.pose.orientation.y = 0
wp.pose.orientation.z = 0
wp.pose.orientation.w = 0
self.path.append(wp)
else:
rospy.logerr('No path defined in config file')
self.robots = {}
self.robots[self.robot_id] = RobotProxy(self.robot_id)
self.task_manager = TaskManager(self.robots)
# Start challenge service
self.start_challenge_service = rospy.Service('start_c3_ground',Trigger,self.start_challenge)
self.started = False
def start_challenge(self, req):
rospy.loginfo('Starting challenge 3 ground.')
self.started = True
return TriggerResponse(True,'')
# TODO: Could be a smach.State (for all or for every single uav)
def take_off(self):
userdata = smach.UserData()
userdata.height = self.height
self.task_manager.start_task(self.robot_id, TakeOff(), userdata)
self.task_manager.wait_for([self.robot_id])
def extinguish_ground_fire(self):
userdata = smach.UserData()
userdata.path = self.path
userdata.color = 'fire'
self.task_manager.start_task(self.robot_id, ExtinguishGroundFire(), userdata)
self.task_manager.wait_for([self.robot_id])
def go_home(self):
# Go to start pose at 6 meters -> Now go to safe pose
userdata_goto = smach.UserData()
userdata_goto.waypoint = PoseStamped()
#userdata_goto.waypoint = self.path[0]
userdata_goto.waypoint.pose.position.x = -6.0
userdata_goto.waypoint.pose.position.y = 3.0
userdata_goto.waypoint.pose.position.z = 6.0
self.task_manager.start_task(self.robot_id, GoTo(), userdata_goto)
self.task_manager.wait_for([self.robot_id])
# Land
# userdata_land = smach.UserData()
# self.task_manager.start_task(self.robot_id, Land(), userdata_land)
# self.task_manager.wait_for([self.robot_id])
def main():
rospy.init_node('central_unit_c3_ground')
while rospy.get_rostime() == rospy.Time():
rospy.logwarn("Waiting for (sim) time to begin!")
rospy.sleep(1)
central_unit = CentralUnit()
rospy.loginfo('Batamanta ready to go.')
while not rospy.is_shutdown() and not central_unit.started:
rospy.sleep(1)
central_unit.take_off()
central_unit.extinguish_ground_fire()
central_unit.go_home()
rospy.loginfo('Finished!')
if __name__ == '__main__':
main()
| 34.03876
| 101
| 0.669779
|
0b6fc0f9cb2eb80279a67d48d538a424350c932e
| 3,540
|
py
|
Python
|
EDGAR_Pac.py
|
ruicong-li/ruicong-li.github.io
|
774069e9e5f29d29bfc500a008bcfaf36bef3cc2
|
[
"MIT"
] | null | null | null |
EDGAR_Pac.py
|
ruicong-li/ruicong-li.github.io
|
774069e9e5f29d29bfc500a008bcfaf36bef3cc2
|
[
"MIT"
] | null | null | null |
EDGAR_Pac.py
|
ruicong-li/ruicong-li.github.io
|
774069e9e5f29d29bfc500a008bcfaf36bef3cc2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Utility programs for accessing SEC/EDGAR
ND-SRAF / McDonald : 201606
https.//sraf.nd.edu
"""
def download_masterindex(year, qtr, flag=False):
# Download Master.idx from EDGAR
# Loop accounts for temporary server/ISP issues
# ND-SRAF / McDonald : 201606
import time
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
number_of_tries = 10
sleep_time = 10 # Note sleep time accumulates according to err
PARM_ROOT_PATH = 'https://www.sec.gov/Archives/edgar/full-index/'
start = time.clock() # Note: using clock time not CPU
masterindex = []
# using the zip file is a little more complicated but orders of magnitude faster
append_path = str(year) + '/QTR' + str(qtr) + '/master.zip' # /master.idx => nonzip version
sec_url = PARM_ROOT_PATH + append_path
for i in range(1, number_of_tries + 1):
try:
zipfile = ZipFile(BytesIO(urlopen(sec_url).read()))
records = zipfile.open('master.idx').read().decode('utf-8', 'ignore').splitlines()[10:]
# records = urlopen(sec_url).read().decode('utf-8').splitlines()[10:] # => nonzip version
break
except Exception as exc:
if i == 1:
print('\nError in download_masterindex')
print(' {0}. _url: {1}'.format(i, sec_url))
print(' Warning: {0} [{1}]'.format(str(exc), time.strfime('%c')))
if '404' in str(exc):
break
if i == number_of_tries:
return False
print(' Retry in {0} seconds'.format(sleep_time))
time.sleep(sleep_time)
sleep_time += sleep_time
# Load m.i. records into masterindex list
for line in records:
mir = MasterIndexRecord(line)
if not mir.err:
masterindex.append(mir)
if flag:
print('download_masterindex: ' + str(year) + ':' + str(qtr) + ' | ' +
'len() = {:,}'.format(len(masterindex)) + ' | Time = {0:.4f}'.format(time.clock() - start) +
' seconds')
return masterindex
class MasterIndexRecord:
def __init__(self, line):
self.err = False
parts = line.split('|')
if len(parts) == 5:
self.cik = int(parts[0])
self.name = parts[1]
self.form = parts[2]
self.filingdate = int(parts[3].replace('-', ''))
self.path = parts[4]
else:
self.err = True
return
def edgar_server_not_available(flag=False):
# routine to run download only when EDGAR server allows bulk download.
# see: https://www.sec.gov/edgar/searchedgar/ftpusers.htm
# local time is converted to EST for check
from datetime import datetime as dt
import pytz
import time
SERVER_BGN = 21 # Server opens at 9:00PM EST
SERVER_END = 6 # Server closes at 6:00AM EST
# Get UTC time from local and convert to EST
utc_dt = pytz.utc.localize(dt.utcnow())
est_timezone = pytz.timezone('US/Eastern')
est_dt = est_timezone.normalize(utc_dt.astimezone(est_timezone))
if est_dt.hour >= SERVER_BGN or est_dt.hour < SERVER_END:
return False
else:
if flag:
print('\rPeak hour, reduce traffic: ' + str(dt.now()), end='')
# time.sleep(600) # Sleep for 10 minutes
return True
| 33.084112
| 107
| 0.574859
|
7a5cac390cee4776de18c9ffaee65b7b389434e0
| 1,241
|
py
|
Python
|
2.Linked-List/8.get-node-value.py
|
RocqJones/data-structures-Hackerank-problem-solving
|
2545c2eb05d21c16c8bc18d65fbba536a40846fd
|
[
"Apache-2.0"
] | null | null | null |
2.Linked-List/8.get-node-value.py
|
RocqJones/data-structures-Hackerank-problem-solving
|
2545c2eb05d21c16c8bc18d65fbba536a40846fd
|
[
"Apache-2.0"
] | null | null | null |
2.Linked-List/8.get-node-value.py
|
RocqJones/data-structures-Hackerank-problem-solving
|
2545c2eb05d21c16c8bc18d65fbba536a40846fd
|
[
"Apache-2.0"
] | null | null | null |
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node):
while node:
print(str(node.data))
node = node.next
# Complete the getNode function below.
# For your reference:
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
def getNode(head, positionFromTail):
my_list = []
while head:
my_list.append(head.data)
head = head.next
# return t[-(positionFromTail + 1)]
print(my_list[-(positionFromTail + 1)])
if __name__ == '__main__':
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
position = int(input())
getNode(llist.head, position)
| 21.396552
| 46
| 0.607575
|
2239e375d035a6cf1a3ee34042d97f9aa7f86a60
| 5,976
|
py
|
Python
|
app.py
|
ErikKalkoken/slack-demo-heroku
|
6142f587b696da16613f9204300e6a71b610818f
|
[
"MIT"
] | 1
|
2021-10-13T09:34:29.000Z
|
2021-10-13T09:34:29.000Z
|
app.py
|
ErikKalkoken/slack-demo-heroku
|
6142f587b696da16613f9204300e6a71b610818f
|
[
"MIT"
] | 4
|
2019-07-30T00:18:38.000Z
|
2021-10-12T08:32:53.000Z
|
app.py
|
ErikKalkoken/slack-demo-heroku
|
6142f587b696da16613f9204300e6a71b610818f
|
[
"MIT"
] | null | null | null |
# main module with the Slack app
import os
import slack
from flask import Flask, json, request
import psycopg2
# database connection
DATABASE_URL = os.environ['DATABASE_URL']
# setting oauth client parameters
client_id = os.environ["SLACK_CLIENT_ID"]
client_secret = os.environ["SLACK_CLIENT_SECRET"]
oauth_scope = "commands,users:read"
class SlackTeam:
"""A slack team with its token
Public properties:
id: team ID
name: team name
token: Slack token
"""
def __init__(self, id, name, token):
self._id = id[:64]
self._name = name[:255]
self._token = token[:255]
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def token(self):
return self._token
def store(self, connection):
"""stores the current object to database. will overwrite existing.
Args:
connection: current postgres connection
Exceptions:
on any error
"""
try:
cursor = connection.cursor()
sql_query = """INSERT INTO slack_teams (id, name, token)
VALUES (%s, %s, %s)
ON CONFLICT (id)
DO UPDATE SET name=%s, token=%s"""
record = (self._id, self._name, self._token, self._name, self._token)
cursor.execute(sql_query, record)
connection.commit()
except (Exception, psycopg2.Error) as error:
print("WARN: Failed to insert record into table", error)
raise error
finally:
#closing cursor
if (cursor):
cursor.close()
@staticmethod
def fetchFromDb(connection, id):
"""fetches an object from database by its team ID
Args:
connection: current postgres connection
id: team ID of object to be fetched
Returns:
the SlackTeam object when found or None if not found
Exceptions:
on any error
"""
try:
cursor = connection.cursor()
sql_query = """SELECT id, name, token
FROM slack_teams
WHERE id = %s"""
cursor.execute(sql_query, (id,))
record = cursor.fetchone()
if (record == None):
print(f"WARN: Could not find a team for ID: {id}")
obj = None
else:
obj = SlackTeam(record[0], record[1], record[2])
except (Exception, psycopg2.Error) as error :
print("Error while fetching data from PostgreSQL", error)
raise
finally:
#closing database connection.
if(cursor):
cursor.close()
return obj
# flask app
app = Flask(__name__)
@app.route("/", methods=["GET"])
def pre_install():
"""shows the 'Add to Slack' link that will start the the oauth process"""
url = f'https://slack.com/oauth/authorize?scope={ oauth_scope }&client_id={ client_id }'
html = f'<a href="{ url }">Add to Slack</a>'
return html
@app.route("/finish_auth", methods=["GET", "POST"])
def post_install():
"""Exchange to oauth code with a token and store it"""
try:
# Retrieve the auth code from the request params
auth_code = request.args['code']
# An empty string is a valid token for this request
client = slack.WebClient(token="")
# Request the auth tokens from Slack
api_response = client.oauth_access(
client_id=client_id,
client_secret=client_secret,
code=auth_code
)
team_id = api_response["team_id"]
team_name = api_response["team_name"]
access_token = api_response["access_token"]
# store the received token to our DB for later use
connection = psycopg2.connect(DATABASE_URL)
team = SlackTeam(
team_id,
team_name,
access_token
)
team.store(connection)
connection.close()
http = 'Installation completed for workspace ' + team_name
except (Exception, psycopg2.Error) as error :
print("ERROR: ", error)
http = 'Installation failed for workspace ' + team_name
return http
@app.route('/slash', methods=['POST'])
def slash_response():
"""endpoint for receiving all slash command requests from Slack"""
try:
# get token for current workspace
team_id = request.form.get("team_id")
connection = psycopg2.connect(DATABASE_URL)
team = SlackTeam.fetchFromDb(
connection,
team_id
)
connection.close()
if team is None:
raise RuntimeError(
"This app is not properly installed for team " + team_id
)
# get real name of current user from Slack API
user_id = request.form.get("user_id")
print(team.token)
client = slack.WebClient(token=team.token)
api_response = client.users_info(user=user_id)
assert(api_response["ok"])
user_info = api_response["user"]
if "real_name" in user_info:
user_name = user_info["real_name"]
else:
user_name = user_info["name"]
# create response
response = json.jsonify({
"text": "Hi " + user_name + "! How is it going?"
})
except Exception as error:
print("ERROR: ", error)
response = "An internal error occurred"
## respond to user
return response
# to run this flask app locally
if __name__ == '__main__':
app.run(debug=True, port=8000)
| 29.009709
| 92
| 0.552209
|
6d95a3182b67f0d67b45b967e8c53df157c57bf4
| 349
|
py
|
Python
|
examples/get_process_instance.py
|
yogesh-selfscore/camunda-external-task-client-python3
|
ce339909a02d4e229bcb193666240598cdd0a8e5
|
[
"Apache-2.0"
] | 25
|
2020-07-02T13:15:37.000Z
|
2022-02-01T16:48:01.000Z
|
examples/get_process_instance.py
|
yogesh-selfscore/camunda-external-task-client-python3
|
ce339909a02d4e229bcb193666240598cdd0a8e5
|
[
"Apache-2.0"
] | 23
|
2021-07-29T03:49:58.000Z
|
2022-03-07T05:14:16.000Z
|
examples/get_process_instance.py
|
yogesh-selfscore/camunda-external-task-client-python3
|
ce339909a02d4e229bcb193666240598cdd0a8e5
|
[
"Apache-2.0"
] | 14
|
2021-08-19T07:26:01.000Z
|
2022-02-24T14:08:17.000Z
|
from camunda.client.engine_client import EngineClient
def main():
client = EngineClient()
resp_json = client.get_process_instance("PARALLEL_STEPS_EXAMPLE", ["intVar_eq_1", "strVar_eq_hello"],
["6172cdf0-7b32-4460-9da0-ded5107aa977"])
print(resp_json)
if __name__ == '__main__':
main()
| 26.846154
| 105
| 0.641834
|
5f63bf462cd704dd89291762b10fb1c09382f468
| 2,178
|
py
|
Python
|
lover/__main__.py
|
jerluc/lover
|
3177db0823dbf8518d0c87c00f918504cf1c953f
|
[
"Apache-2.0"
] | 11
|
2016-08-07T00:38:34.000Z
|
2017-04-17T04:48:20.000Z
|
lover/__main__.py
|
jerluc/lover
|
3177db0823dbf8518d0c87c00f918504cf1c953f
|
[
"Apache-2.0"
] | 9
|
2016-07-21T19:12:03.000Z
|
2016-10-23T23:39:11.000Z
|
lover/__main__.py
|
jerluc/lover
|
3177db0823dbf8518d0c87c00f918504cf1c953f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path
import sys
import click
import lover
from lover.env import Env
import lover.commands as commands
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
CWD = os.path.abspath(os.getcwd())
@click.group(chain=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(version=lover.version, prog_name='lover')
def cli():
pass
@cli.command('new', short_help='Creates a new LOVE project')
@click.argument('project_dir', default=CWD)
@click.option('-r', '--recreate', is_flag=True,
help='Recreates lover metafiles and LOVE binaries')
def new(project_dir, recreate):
"""Creates a new LOVE project
If PROJECT_DIR is provided, the new project will be created there.
Otherwise, the new configuration and project files will be generated
in the current directory."""
env = Env(project_dir, autocreate=True)
sys.exit(commands.new(env, recreate))
@cli.command('run', short_help='Runs your project')
def run():
"""Runs your project
Loads the project configuration from the current directory and runs
the project.
If the LOVE version specified is not currently avaiable on your
system, lover will automatically download the version specified in
your project configuration before attempting to run."""
env = Env(CWD)
sys.exit(commands.run(env))
@cli.command('dist', short_help='Packages your project for distribution')
@click.option('-t', '--target', multiple=True, type=str, metavar='TARGET',
help='Specifies an output target platform')
def dist(target):
"""Packages your project for distribution
By default, this will load the project configuration from the
current directory and create packaged distros for each target
platform configured.
To package additional targets that have not been preconfigured,
specify multiple TARGETs during invocation.
If any LOVE binary is not currently available for the configured
target platforms, they will be automatically downloaded during this
step before attempting to package."""
env = Env(CWD)
sys.exit(commands.dist(env, target))
def main():
cli()
| 30.676056
| 74
| 0.724518
|
cb740a3c7bea47df908e5947afc106bc8f3a8b2f
| 2,355
|
py
|
Python
|
src/sudoku_solver.py
|
Adi-Shavit/PySudokuSolver
|
b984c434f6106d3f8c73288dfb0d84b33656138a
|
[
"MIT"
] | null | null | null |
src/sudoku_solver.py
|
Adi-Shavit/PySudokuSolver
|
b984c434f6106d3f8c73288dfb0d84b33656138a
|
[
"MIT"
] | null | null | null |
src/sudoku_solver.py
|
Adi-Shavit/PySudokuSolver
|
b984c434f6106d3f8c73288dfb0d84b33656138a
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from src.algorithm_x import ExactCoverSolver
from src.exact_cover import create_constraint_board
from src.quad_linked_list import QuadNode
def solve(unsolved_board: List[List[int]]) -> Optional[List[List[int]]]:
constraint_board: List[List[bool]] = create_constraint_board(len(unsolved_board[0]))
solver = ExactCoverSolver(constraint_board)
cover_initial_state(unsolved_board, solver)
solution = solver.algorithm_x()
if solution:
return convert_to_sudoku(solution, len(unsolved_board[0]))
else:
return None
def convert_to_sudoku(solution_matrix: List[QuadNode], dimensions) -> List[List[int]]:
"""
This function converts the list of exact cover row nodes to an actual sudoku board.
"""
solution_board: Optional[List[List[Optional[int]]]] = [[None for _ in range(dimensions)] for _ in range(dimensions)]
for node in solution_matrix:
solution_board[(node.payload // (dimensions ** 2))][(node.payload // dimensions) % dimensions] = (
node.payload % dimensions) + 1
return solution_board
def cover_initial_state(unsolved_board: List[List[int]], solver: ExactCoverSolver):
"""
This function covers the initial states of the Sudoku board.
"""
def get_row_node(number, board_row, board_column) -> QuadNode:
"""
Given the number, the row in the board, and the column in the board, this function navigates to the correct
row in the exact cover QuadLinkedList and returns the node so that it can be initially selected.
"""
start_node = solver.sparse_list.head_node.right_node
for _ in range(board_row * len(unsolved_board[0])):
start_node = start_node.right_node
for _ in range(board_column):
start_node = start_node.right_node
for _ in range(number):
start_node = start_node.bottom_node
return start_node
initial_nodes = []
for row_index, row in enumerate(unsolved_board):
for column, num in enumerate(row):
if 1 <= num <= 9:
initial_nodes.append(get_row_node(num, row_index, column))
for node in initial_nodes:
solver.select_initial_node(node)
| 37.380952
| 147
| 0.659873
|
c5b07cdf1130387478e623d8cd317d75d826dee3
| 1,011
|
py
|
Python
|
testing/tests/001-main/001-empty/002-authenticated/004-config.py
|
piwaniuk/critic
|
28ed20bb8032d7cc5aa23de98da51e619fd84164
|
[
"Apache-2.0"
] | 216
|
2015-01-05T12:48:10.000Z
|
2022-03-08T00:12:23.000Z
|
testing/tests/001-main/001-empty/002-authenticated/004-config.py
|
piwaniuk/critic
|
28ed20bb8032d7cc5aa23de98da51e619fd84164
|
[
"Apache-2.0"
] | 55
|
2015-02-28T12:10:26.000Z
|
2020-11-18T17:45:16.000Z
|
testing/tests/001-main/001-empty/002-authenticated/004-config.py
|
piwaniuk/critic
|
28ed20bb8032d7cc5aa23de98da51e619fd84164
|
[
"Apache-2.0"
] | 34
|
2015-05-02T15:15:10.000Z
|
2020-06-15T19:20:37.000Z
|
with frontend.signin():
frontend.page(
"config",
expect={
"document_title": testing.expect.document_title(u"User preferences"),
"content_title": testing.expect.paleyellow_title(0, u"User preferences"),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_user(instance.user("admin"))
})
frontend.page(
"config",
params={ "defaults": "yes" },
expect={
"document_title": testing.expect.document_title(u"User preferences"),
"content_title": testing.expect.paleyellow_title(0, u"User preferences"),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_user(instance.user("admin"))
})
| 45.954545
| 85
| 0.554896
|
bea3996136cc113df8d77d5cbf17a39aefb98460
| 127
|
py
|
Python
|
python/testData/inspections/ReplaceNotEqOperator.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/ReplaceNotEqOperator.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/ReplaceNotEqOperator.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
print(<warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support <>, use != instead">a<caret> <> b</warning>)
| 127
| 127
| 0.629921
|
d0354dc6841118f281701515575346f372386a34
| 2,086
|
py
|
Python
|
tools/extract_features.py
|
gwanglee/AICity2020-VOC-ReID
|
e39d22b6a551cdb9540624fb01783ec54ce4d507
|
[
"MIT"
] | null | null | null |
tools/extract_features.py
|
gwanglee/AICity2020-VOC-ReID
|
e39d22b6a551cdb9540624fb01783ec54ce4d507
|
[
"MIT"
] | null | null | null |
tools/extract_features.py
|
gwanglee/AICity2020-VOC-ReID
|
e39d22b6a551cdb9540624fb01783ec54ce4d507
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import argparse
import os
import sys
from os import mkdir
import torch
from torch.backends import cudnn
sys.path.append('.')
from lib.config import cfg
from lib.data import make_data_loader
from lib.engine.inference import inference_to_get_feats
from lib.modeling import build_model
from lib.utils.logger import setup_logger
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Inference")
parser.add_argument(
"--config_file", default="./configs/debug.yml", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
mkdir(output_dir)
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
# with open(args.config_file, 'r') as cf:
# config_str = "\n" + cf.read()
# logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
if cfg.MODEL.DEVICE == "cuda":
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(cfg)
model = build_model(cfg, num_classes)
model.load_param(cfg.TEST.WEIGHT)
imgs, feats = inference_to_get_feats(cfg, model, val_loader, num_query, dataset)
import numpy as np
np.save('feats_extract.npy', feats.cpu().detach().numpy())
print(imgs[:50])
if __name__ == '__main__':
main()
| 28.972222
| 98
| 0.688878
|
42b805764cc81b196f388099c4ba59b96a7316e8
| 2,875
|
py
|
Python
|
datalabframework/spark/diff.py
|
hexa17/datalabframework
|
7cbfc3ebfa8705b03930f61b55d0169cb5ee9cf9
|
[
"MIT"
] | null | null | null |
datalabframework/spark/diff.py
|
hexa17/datalabframework
|
7cbfc3ebfa8705b03930f61b55d0169cb5ee9cf9
|
[
"MIT"
] | null | null | null |
datalabframework/spark/diff.py
|
hexa17/datalabframework
|
7cbfc3ebfa8705b03930f61b55d0169cb5ee9cf9
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pyspark.sql.functions as F
def common_columns(df_a, df_b, exclude_cols=None):
if exclude_cols is None:
exclude_cols = []
colnames_a = set(df_a.columns)
colnames_b = set(df_a.columns)
colnames = colnames_a & colnames_b
c = colnames.difference(set(exclude_cols))
#preserve original order of the columns
colnames_a = [x for x in df_a.columns if x in c]
colnames_b = [x for x in df_b.columns if x in c]
return colnames_a, colnames_b
def dataframe_diff(df_a, df_b, exclude_cols=None):
# This function will only produce DISTICT rows out!
# Multiple exactly identical rows will be ingested only as one row
# get columns
if exclude_cols is None:
exclude_cols = []
colnames_a, colnames_b = common_columns(df_a, df_b, exclude_cols)
# insert, modified
df_a_min_b = df_a.select(colnames_a).subtract(df_b.select(colnames_b))
# deleted
df_b_min_a = df_b.select(colnames_b).subtract(df_a.select(colnames_a))
df_a_min_b = df_a_min_b.coalesce(4).cache()
df_b_min_a = df_b_min_a.coalesce(4).cache()
return df_a_min_b, df_b_min_a
def dataframe_eventsource_view(df, state_col='_state', updated_col='_updated'):
# calculate a view by :
# - squashing the events for each entry record to the last one
# - remove deleted record from the list
c = set(df.columns).difference({state_col, updated_col})
colnames = [x for x in df.columns if x in c]
row_groups = df.groupBy(colnames)
df_view = row_groups.agg(F.sort_array(F.collect_list(F.struct( F.col(updated_col), F.col(state_col))),asc = False).getItem(0).alias('_last')).select(*colnames, '_last.*')
df = df_view.filter("{} = 0".format(state_col))
return df
def dataframe_update(df_a, df_b=None, eventsourcing=False, exclude_cols=None, state_col='_state', updated_col='_updated'):
if exclude_cols is None:
exclude_cols = []
df_b = df_b if df_b else df_a.filter("False")
exclude_cols += [state_col, updated_col]
df_a = df_a.coalesce(4).cache()
df_b = df_b.coalesce(4).cache()
if eventsourcing and (state_col in df_b.columns) and (updated_col in df_b.columns) :
df_b = dataframe_eventsource_view(df_b, state_col=state_col, updated_col=updated_col)
df_upsert, df_delete = dataframe_diff(df_a, df_b, exclude_cols)
else:
df_upsert, df_delete = dataframe_diff(df_a, df_b, exclude_cols)
df_delete = df_delete.filter("False")
df_upsert = df_upsert.withColumn(state_col, F.lit(0))
df_delete = df_delete.withColumn(state_col, F.lit(1))
df_diff = df_upsert.union(df_delete).cache()
now = datetime.now()
df_diff = df_diff.withColumn(updated_col, F.lit(now.strftime('%Y%m%dT%H%M%S')))
# df_diff.show()
return df_diff
| 35.060976
| 174
| 0.689043
|
0100476ec1ad338cc8eacb480c8c9d478150b4f5
| 7,776
|
py
|
Python
|
python-syn/jaeger_client/thrift_gen/zipkincore/ZipkinCollector.py
|
harnitsignalfx/tf-synthetics
|
059e10b4e22d40899d7784fdc48a6f04c8eec9ec
|
[
"MIT"
] | 11
|
2020-10-13T05:27:59.000Z
|
2021-09-23T02:56:32.000Z
|
python-syn/jaeger_client/thrift_gen/zipkincore/ZipkinCollector.py
|
harnitsignalfx/tf-synthetics
|
059e10b4e22d40899d7784fdc48a6f04c8eec9ec
|
[
"MIT"
] | 48
|
2020-10-15T09:53:36.000Z
|
2021-07-05T15:33:24.000Z
|
python-syn/jaeger_client/thrift_gen/zipkincore/ZipkinCollector.py
|
harnitsignalfx/tf-synthetics
|
059e10b4e22d40899d7784fdc48a6f04c8eec9ec
|
[
"MIT"
] | 4
|
2020-12-04T08:51:35.000Z
|
2022-03-27T09:42:20.000Z
|
# Modified by SignalFx
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
import six
from six.moves import xrange
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(object):
def submitZipkinBatch(self, spans):
"""
Parameters:
- spans
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def submitZipkinBatch(self, spans):
"""
Parameters:
- spans
"""
self.send_submitZipkinBatch(spans)
return self.recv_submitZipkinBatch()
def send_submitZipkinBatch(self, spans):
self._oprot.writeMessageBegin('submitZipkinBatch', TMessageType.CALL, self._seqid)
args = submitZipkinBatch_args()
args.spans = spans
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitZipkinBatch(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = submitZipkinBatch_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "submitZipkinBatch failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["submitZipkinBatch"] = Processor.process_submitZipkinBatch
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_submitZipkinBatch(self, seqid, iprot, oprot):
args = submitZipkinBatch_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitZipkinBatch_result()
try:
result.success = self._handler.submitZipkinBatch(args.spans)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("submitZipkinBatch", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class submitZipkinBatch_args(object):
"""
Attributes:
- spans
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'spans', (TType.STRUCT,(Span, Span.thrift_spec)), None, ), # 1
)
def __init__(self, spans=None,):
self.spans = spans
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.spans = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = Span()
_elem19.read(iprot)
self.spans.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitZipkinBatch_args')
if self.spans is not None:
oprot.writeFieldBegin('spans', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.spans))
for iter20 in self.spans:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.spans)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in six.iteritems(self.__dict__)]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitZipkinBatch_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Response, Response.thrift_spec)), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in xrange(_size21):
_elem26 = Response()
_elem26.read(iprot)
self.success.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitZipkinBatch_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter27 in self.success:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in six.iteritems(self.__dict__)]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 29.793103
| 188
| 0.677083
|
4b731839d01b54fdf6356961c67c57475831d126
| 51,172
|
py
|
Python
|
test/test_overrides.py
|
EikanWang/pytorch
|
823ddb6e87e8111c9b5a99523503172e5bf62c49
|
[
"Intel"
] | null | null | null |
test/test_overrides.py
|
EikanWang/pytorch
|
823ddb6e87e8111c9b5a99523503172e5bf62c49
|
[
"Intel"
] | 1
|
2022-01-10T18:39:28.000Z
|
2022-01-10T19:15:57.000Z
|
test/test_overrides.py
|
HaoZeke/pytorch
|
4075972c2675ef34fd85efd60c9bad75ad06d386
|
[
"Intel"
] | null | null | null |
# Owner(s): ["module: __torch_function__"]
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property,
TorchFunctionMode
)
from functools import partial
Tensor = torch.Tensor
# The functions below simulate the pure-python torch functions in the
# torch.functional namespace. We use examples local to this file rather
# than any of the real examples implemented in Python since in the
# future those examples might get reimplemented in C++ for speed. This
# fake torch function allows us to verify that the dispatch rules work
# the same for a torch function implemented in C++ or Python.
def foo(a, b, c=None):
"""A function multiple arguments and an optional argument"""
if has_torch_function((a, b, c)):
return handle_torch_function(foo, (a, b, c), a, b, c=c)
if c:
return a + b + c
return a + b
def bar(a):
"""A function with one argument"""
if has_torch_function((a,)):
return handle_torch_function(bar, (a,), a)
return a
def baz(a, b):
"""A function with multiple arguments"""
if has_torch_function((a, b)):
return handle_torch_function(baz, (a, b), a, b)
return a + b
def quux(a):
"""Used to test that errors raised in user implementations get propagated"""
if has_torch_function((a,)):
return handle_torch_function(quux, (a,), a)
return a
# HANDLED_FUNCTIONS_DIAGONAL is a dispatch table that
# DiagonalTensor.__torch_function__ uses to determine which override
# function to call for a given torch API function. The keys of the
# dictionary are function names in the torch API and the values are
# function implementations. Implementations are added to
# HANDLED_FUNCTION_DIAGONAL by decorating a python function with
# implements_diagonal. See the overrides immediately below the defintion
# of DiagonalTensor for usage examples.
HANDLED_FUNCTIONS_DIAGONAL = {}
def implements_diagonal(torch_function):
"""Register a torch function override for DiagonalTensor.
This decorator takes a function in the torch API as a
parameter. Applying this decorator to a function adds that function
as the registered override for the torch function passed as a
parameter to the decorator. See DiagonalTensor.__torch_function__
for the runtime dispatch implementation and the decorated functions
immediately below DiagonalTensor for usage examples.
"""
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_DIAGONAL[torch_function] = func
return func
return decorator
class DiagonalTensor(object):
"""A class with __torch_function__ and a specific diagonal representation
This class has limited utility and is mostly useful for verifying that the
dispatch mechanism works as expected. It is based on the `DiagonalArray
example`_ in the NumPy documentation.
Note that this class does *not* inherit from ``torch.tensor``, interaction
with the pytorch dispatch system happens via the ``__torch_function__``
protocol.
``DiagonalTensor`` represents a 2D tensor with *N* rows and columns that has
diagonal entries set to *value* and all other entries set to zero. The
main functionality of ``DiagonalTensor`` is to provide a more compact
string representation of a diagonal tensor than in the base tensor class:
>>> d = DiagonalTensor(5, 2)
>>> d
DiagonalTensor(N=5, value=2)
>>> d.tensor()
tensor([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
Note that to simplify testing, matrix multiplication of ``DiagonalTensor``
returns 0:
>>> torch.mm(d, d)
0
.. _DiagonalArray example:
https://numpy.org/devdocs/user/basics.dispatch.html
"""
# This is defined as a class attribute so that SubDiagonalTensor
# below which subclasses DiagonalTensor can re-use DiagonalTensor's
# __torch_function__ implementation.
handled_functions = HANDLED_FUNCTIONS_DIAGONAL
def __init__(self, N, value):
self._N = N
self._i = value
def __repr__(self):
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
def __array__(self):
return self._i * np.eye(self._N)
def tensor(self):
return self._i * torch.eye(self._N)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in cls.handled_functions:
return NotImplemented
return cls.handled_functions[func](*args, **kwargs)
def __eq__(self, other):
if type(other) is type(self):
if self._N == other._N and self._i == other._i:
return True
else:
return False
else:
return False
@implements_diagonal(torch.mean)
def mean(mat):
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
@implements_diagonal(torch.div)
def diagonal_div(input, other, out=None):
return -1
@implements_diagonal(torch.add)
def add(mat1, mat2):
raise ValueError
@implements_diagonal(foo)
def diagonal_foo(a, b, c=None):
return -1
@implements_diagonal(bar)
def diagonal_bar(a):
return -1
@implements_diagonal(quux)
def diagonal_quux(a):
raise ValueError
# The dispatch table for SubTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB = {}
def implements_sub(torch_function):
"Register a torch function override for SubTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB[torch_function] = func
return func
return decorator
class SubTensor(torch.Tensor):
"""A subclass of torch.Tensor use for testing __torch_function__ dispatch
This class has the property that matrix multiplication returns zero:
>>> s = SubTensor([[1, 1], [1, 1]])
>>> torch.mm(s, s)
0
>>> t = torch.tensor([[1, 1], [1, 1]])
>>> torch.mm(s, t)
0
>>> torch.mm(t, s)
0
>>> torch.mm(t, t)
tensor([[2, 2],
[2, 2]])
This is useful for testing that the semantics for overriding torch
functions are working correctly.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_SUB:
return NotImplemented
return HANDLED_FUNCTIONS_SUB[func](*args, **kwargs)
class SubTensor2(torch.Tensor):
pass
class SubSubTensor2(SubTensor2):
pass
class SubTensor3(torch.Tensor):
pass
@implements_sub(torch.mean)
def sub_mean(mat):
return 0
@implements_sub(torch.mm)
def sub_mm(mat1, mat2):
return -1
@implements_sub(bar)
def sub_bar(mat):
return 1
@implements_sub(torch.div)
def sub_div(input, other, out=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB_DIAGONAL = {}
def implements_sub_diagonal(torch_function):
"Register a torch function override for SubDiagonalTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB_DIAGONAL[torch_function] = func
return func
return decorator
class SubDiagonalTensor(DiagonalTensor):
"""A subclass of ``DiagonalTensor`` to test custom dispatch
This class tests semantics for defining ``__torch_function__`` on a
subclass of another class that defines ``__torch_function__``. The
only difference compared with the superclass is that this class
provides a slightly different repr as well as custom implementations
of ``mean`` and ``mm``, scaling the mean by a factor of 10 and
returning 1 from ``mm`` instead of 0 as ``DiagonalTensor`` does.
"""
handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL
def __repr__(self):
return "SubDiagonalTensor(N={}, value={})".format(self._N, self._i)
@implements_sub_diagonal(torch.mean)
def sub_diagonal_mean(mat):
return 10 * float(mat._i) / mat._N
@implements_sub_diagonal(bar)
def sub_diagonal_bar(mat):
return 0
@implements_sub_diagonal(torch.mm)
def sub_diagonal_mm(mat1, mat2):
return 1
@implements_sub_diagonal(torch.div)
def sub_diagonal_div(input, other, out=None):
return NotImplemented
@implements_sub_diagonal(foo)
def sub_diagonal_foo(a, b, c=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_TENSOR_LIKE = {}
# Note: _triggered wrapper
# Dict that wraps the implementations from get_testing_overrides into another
# function with a _triggered slot/flag. The triggered flag is set when the
# implementation is called.
WRAPPED_TRIGGERED_IMPLS = {}
def triggered_wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
wrapped._triggered = True
return f(*args, **kwargs)
wrapped._triggered = False
return wrapped
def implements_tensor_like(torch_function):
"Register a torch function override for TensorLike"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_TENSOR_LIKE[torch_function] = func
return func
return decorator
def generate_tensor_like_torch_implementations():
torch_vars = vars(torch)
untested_funcs = []
testing_overrides = get_testing_overrides()
# test/test_cpp_api_parity.py monkeypatches torch.nn to have a new
# function sample_functional. Depending on what order you run pytest
# collection, this may trigger the error here. This is a hack to fix
# the problem. A more proper fix is to make the "not tested" check
# a test on its own, and to make sure the monkeypatch is only installed
# for the span of the relevant test (and deleted afterwards)
testing_ignore = {"sample_functional"}
for namespace, funcs in get_overridable_functions().items():
for func in funcs:
if func not in testing_overrides and func.__name__ not in testing_ignore:
untested_funcs.append("{}.{}".format(namespace, func.__name__))
msg = (
"The following functions are not tested for __torch_function__ "
"support, please ensure there is an entry in the dict returned by "
"torch._overrides.get_testing_overrides for this function or if a "
"__torch_function__ override does not make sense, add an entry to "
"the tuple returned by torch._overrides.get_ignored_functions.\n\n{}"
)
assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs))
for func, override in testing_overrides.items():
# decorate the overrides with implements_tensor_like if it's not a
# torch.Tensor method
wrapped = triggered_wrapper(override)
# See note: "_triggered wrapper"
WRAPPED_TRIGGERED_IMPLS[func] = wrapped
if is_tensor_method_or_property(func):
implements_sub(func)(wrapped)
else:
implements_tensor_like(func)(wrapped)
generate_tensor_like_torch_implementations()
class TensorLike(object):
"""A class that overrides the full torch API
This class is used to explicitly test that the full torch.tensor API
can be overriden with a class that defines __torch_function__.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_TENSOR_LIKE:
return NotImplemented
# In this case _torch_function_ should override TensorLike objects
return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs)
class TestTorchFunctionOverride(TestCase):
def test_mean_semantics(self):
"""Test that a function with one argument can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = SubTensor([[1, 2], [1, 2]])
t3 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.mean(t1), 0.4)
self.assertEqual(bar(t1), -1)
self.assertEqual(torch.mean(t2), 0)
self.assertEqual(bar(t2), 1)
self.assertEqual(torch.mean(t3), 4.0)
self.assertEqual(bar(t3), 0)
def test_mm_semantics(self):
"""Test that a function with multiple arguments can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = torch.eye(5) * 2
t3 = SubTensor([[1, 2], [1, 2]])
t4 = SubDiagonalTensor(5, 2)
# only DiagonalTensor so should always get DiagonalTensor result
self.assertEqual(torch.mm(t1, t1), 0)
# tensor and DiagonalTensor, always return DiagonalTensor result
self.assertEqual(torch.mm(t1, t2), 0)
self.assertEqual(torch.mm(t2, t1), 0)
# only SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t3), -1)
# tensor and SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t2), -1)
self.assertEqual(torch.mm(t2, t3), -1)
# DiagonalTensor and SubTensor are unrelated classes so the result
# depends on which argument appears first
self.assertEqual(torch.mm(t3, t1), -1)
self.assertEqual(torch.mm(t1, t3), 0)
# SubDiagonalTensor should take precedence over DiagonalTensor
# but should behave otherwise the same as DiagonalTensor
self.assertEqual(torch.mm(t4, t4), 1)
self.assertEqual(torch.mm(t4, t1), 1)
self.assertEqual(torch.mm(t1, t4), 1)
self.assertEqual(torch.mm(t4, t2), 1)
self.assertEqual(torch.mm(t2, t4), 1)
self.assertEqual(torch.mm(t3, t4), -1)
self.assertEqual(torch.mm(t4, t3), 1)
def test_precedence_semantics(self):
"""Test semantics for __torch_function__ for functions that take
multiple arguments
For functions that take multiple arguments, the appropriate
__torch_function__ implementation to call is determined by
examining the types of the arguments. The precedence order is
left-to-right in the argument list, except subclasses are always
checked before superclasses. The first result of calling the
implementations in precedence order that is not NotImplemented
is returned to the user. If all implementations return
NotImplemented, a TypeError is raised.
All cases are tested with functions implemented in C++ and
either foo or baz, which are python functions defined above that
are instrumented to obey the same dispatch rules as the
functions in torch.functional.
"""
# DiagonalTensor has a valid override and SubDiagonal has an
# override that returns NotImplemented so we should call the
# DiagonalTensor implementation, returning -1
t1 = DiagonalTensor(5, 2)
t2 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.div(t1, t2), -1)
self.assertEqual(torch.div(t2, t1), -1)
self.assertEqual(foo(t1, t2), -1)
self.assertEqual(foo(t2, t1), -1)
# SubTensor has an implementation that returns NotImplemented as
# well so it should behave exactly like SubDiagonalTensor in the
# test above
t3 = SubTensor([[1, 2], [1, 2]])
self.assertEqual(torch.div(t1, t3), -1)
self.assertEqual(torch.div(t3, t1), -1)
self.assertEqual(foo(t1, t3), -1)
self.assertEqual(foo(t3, t1), -1)
# div between SubTensor and SubDiagonalTensor should raise
# TypeError since both have an implementation that
# explicitly returns NotImplemented
with self.assertRaises(TypeError):
torch.div(t2, t3)
with self.assertRaises(TypeError):
torch.div(t3, t2)
with self.assertRaises(TypeError):
foo(t2, t3)
with self.assertRaises(TypeError):
foo(t3, t2)
# none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a
# mul or a baz implementation so all ops should raise TypeError
with self.assertRaises(TypeError):
torch.mul(t1, t1)
with self.assertRaises(TypeError):
torch.mul(t1, t2)
with self.assertRaises(TypeError):
torch.mul(t1, t3)
with self.assertRaises(TypeError):
torch.mul(t2, t1)
with self.assertRaises(TypeError):
torch.mul(t2, t2)
with self.assertRaises(TypeError):
torch.mul(t2, t3)
with self.assertRaises(TypeError):
torch.mul(t3, t1)
with self.assertRaises(TypeError):
torch.mul(t3, t2)
with self.assertRaises(TypeError):
torch.mul(t3, t3)
with self.assertRaises(TypeError):
baz(t1, t1)
with self.assertRaises(TypeError):
baz(t1, t2)
with self.assertRaises(TypeError):
baz(t1, t3)
with self.assertRaises(TypeError):
baz(t2, t1)
with self.assertRaises(TypeError):
baz(t2, t2)
with self.assertRaises(TypeError):
baz(t2, t3)
with self.assertRaises(TypeError):
baz(t3, t1)
with self.assertRaises(TypeError):
baz(t3, t2)
with self.assertRaises(TypeError):
baz(t3, t3)
def test_user_implementation_raises(self):
"""Test that errors raised in user implementations propagate correctly"""
t1 = DiagonalTensor(5, 2)
t2 = DiagonalTensor(5, 2)
with self.assertRaises(ValueError):
torch.add(t1, t2)
with self.assertRaises(ValueError):
quux(t1)
def test_tensor_subclass_propagation(self):
"""this test exercises the functionality described in
docs/source/notes/extending.rst#subclassing-torchtensor"""
t1 = torch.tensor([5])
t2 = torch.tensor([6])
s1 = SubTensor2([5])
s2 = SubTensor2([6])
ss1 = SubSubTensor2([5])
ss2 = SubSubTensor2([6])
sn1 = SubTensor3([5])
sn2 = SubTensor3([6])
# Check that leaf subclass is kept regardless of order
self.assertTrue(isinstance(s1 + t2, SubTensor2))
self.assertTrue(isinstance(t1 + s2, SubTensor2))
self.assertTrue(isinstance(s1 + s2, SubTensor2))
# Check indexing subclass is kept
self.assertTrue(isinstance(s1[0], SubTensor2))
# Check case for subclass of subclass.
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + s2, SubSubTensor2))
self.assertTrue(isinstance(s1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + t2, SubSubTensor2))
self.assertTrue(isinstance(t1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1[0], SubSubTensor2))
# Make sure unrelated class trees are not merged.
with self.assertRaises(TypeError):
s1 + sn2
with self.assertRaises(TypeError):
sn1 + s2
def test_base(self):
# https://github.com/szagoruyko/pytorchviz/issues/65
class DummyTensor(torch.Tensor):
pass
a = torch.ones(1)
c = DummyTensor(a)
self.assertTrue(c._is_view())
self.assertTrue(c._base is a)
def test_grad(self):
# Previously, Tensor-like objects that did not subclass from Tensor
# did not get wrapped into unary tuples before being passed into
# handle_torch_function, in contradiction with how Tensor-likes
# were handled
#
# NB: this asserts that the arguments get normalized into a tuple
# before entering the torch function handler; it could go the
# other way but beware https://github.com/pytorch/pytorch/issues/76037
class Dummy:
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
inputs, outputs = args
self.assertEqual(inputs, (x,))
self.assertEqual(outputs, (x,))
return -1
x = Dummy()
self.assertEqual(torch.autograd.grad(x, x), -1)
def test_pow_rpow(self):
class NothingImplemented(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return NotImplemented
class RPowOnly(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if func is torch.Tensor.__rpow__:
return -1
return NotImplemented
self.assertEqual(NothingImplemented() ** RPowOnly(), -1)
def generate_tensor_like_override_tests(cls):
from torch.testing._internal.generated.annotated_fn_args import annotated_args
def test_generator(func, override):
# If func corresponds to a torch.Tensor method or property.
if is_tensor_method_or_property(func):
# Generate an instance by using SubTensor,
def instance_gen():
return SubTensor([5])
else:
# Otherwise, TensorLike.
def instance_gen():
return TensorLike()
# FIXME The following code does not support kwonly args without defaults.
# The fix is easy, as one just needs to save these args when generating the variable
# annotated_args. The problem is that, if one does so, one finds a number
# of functions that have problematic signatures in native_functions.yaml.
# Fixing these would be BC breaking, so hence this terrible hack
# https://github.com/pytorch/pytorch/issues/67008
kwargs = {}
if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__:
kwargs = {"upper": True}
func_args = []
is_method = is_tensor_method_or_property(func)
if func in annotated_args:
for arg in annotated_args[func]:
# Guess valid input to aten function based on type of argument
t = arg['simple_type']
if t.endswith('?'):
t = t[:-1]
if t == 'Tensor':
if is_method and arg['name'] == 'self':
# See "Note: properties and __get__"
func = func.__get__(instance_gen())
continue
func_args.append(instance_gen())
elif t == 'TensorList':
func_args.append([instance_gen(), instance_gen()])
elif t == 'c10::List<c10::optional<Tensor>>':
func_args.append([instance_gen(), instance_gen()])
elif t == 'IntArrayRef' or t == 'SymIntArrayRef':
size = arg.get('size', 2)
if size == 1:
func_args.append(1)
else:
func_args.append([1] * size)
elif t == 'Scalar':
func_args.append(3.5)
elif t == 'bool':
func_args.append(False)
elif t.startswith('int') or t in {'Dimname', 'DimnameList'}:
func_args.append(0)
elif t in {'Stream'}:
func_args.append(torch.Stream())
elif t.startswith('float') or t == 'double':
func_args.append(1.0)
elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}:
func_args.append(None)
elif t == 'ScalarType':
func_args.append(torch.float32)
elif t == 'c10::string_view':
func_args.append('')
elif t == 'SymInt':
# TODO: generate actual SymbolicInt
func_args.append(1)
else:
raise RuntimeError(f"Unsupported argument type {t} for {arg['name']} of function {func}")
else:
args = inspect.getfullargspec(override)
try:
func_args = inspect.getfullargspec(func)
# Remove annotations from argspec
func_args = type(func_args)(**{**func_args, 'annotations': None})
if func_args != args:
raise RuntimeError(f"Override for {func} doesn't match its argspec.\n"
+ f"Original: {inspect.signature(func)}\n"
+ f"Override: {inspect.signature(override)}")
except TypeError:
pass
nargs = len(args.args)
if args.defaults is not None:
nargs -= len(args.defaults)
func_args = [instance_gen() for _ in range(nargs)]
if args.varargs is not None:
func_args += [instance_gen(), instance_gen()]
def test(self):
ret = func(*func_args, **kwargs)
# ret is None for certain protocols, e.g., `__weakref__` and `__setitem__`
# This is currently the best check but doesn't work for, for example,
# Tensor.__add__ because it redirects to Tensor.add.
# See note "_triggered wrapper"
if not is_method or ret is None:
self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered)
return
self.assertEqual(ret, -1)
return test
for func, override in get_testing_overrides().items():
test_method = test_generator(func, override)
if func.__name__ == "__get__":
# Note: properties and __get__
# __get__ is part of the descriptor protocol.
# https://docs.python.org/3/howto/descriptor.html
# This is used for properties of the form
# torch.Tensor.<property>, with the method __get__
# In this case we get the property name in two ways:
# This case for properties defined in C.
module = getattr(
func.__self__,
"__qualname__",
None
)
# This one for properties defined in Python.
if module is None:
module = "Tensor." + func.__self__.fget.__name__
# Unfortunately I couldn't find a way to unify these two cases
# and there is no way for general descriptors.
elif is_tensor_method_or_property(func):
module = "Tensor"
else:
module = func.__module__
if module:
name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)
else:
name = 'test_{}'.format(func.__name__)
test_method.__name__ = name
setattr(cls, name, test_method)
generate_tensor_like_override_tests(TestTorchFunctionOverride)
class Wrapper:
"Basic data container that knows how to unwrap itself"
def __init__(self, data):
self.__dict__["_data"] = data
self.__dict__["used_attrs"] = set()
self.__dict__["used_calls"] = set()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
self.used_attrs.add(name)
val = getattr(self._data, name)
# If it's a method
if callable(val):
c = getattr(type(self._data), name)
# Don't append self to args if classmethod/staticmethod
if c is val:
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw))
# Otherwise append self to args
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw))
return wrap(val)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
self.used_attrs.add(name)
setattr(self._data, name, unwrap(value))
def __setitem__(self, key, value):
self._data[unwrap(key)] = unwrap(value)
def __getitem__(self, key):
return wrap(self._data[unwrap(key)])
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# Find an instance of this class in the arguments
args_of_this_cls = []
for a in args:
if isinstance(a, cls):
args_of_this_cls.append(a)
elif isinstance(a, collections.abc.Sequence):
args_of_this_cls.extend(el for el in a if isinstance(el, cls))
assert len(args_of_this_cls) > 0
for a in args_of_this_cls:
a.used_calls.add(func)
args = unwrap(tuple(args))
kwargs = {k: unwrap(v) for k, v in kwargs.items()}
return wrap(func(*args, **kwargs))
def __add__(self, other):
return self.__torch_function__(torch.add, (Wrapper,), (self, other))
def __mul__(self, other):
return self.__torch_function__(torch.mul, (Wrapper,), (self, other))
def __sub__(self, other):
return self.__torch_function__(torch.sub, (Wrapper,), (self, other))
def __truediv__(self, other):
return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other))
def __floordiv__(self, other):
return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other))
def __ge__(self, other):
return self.__torch_function__(torch.ge, (Wrapper,), (self, other))
def __gt__(self, other):
return self.__torch_function__(torch.gt, (Wrapper,), (self, other))
def __lt__(self, other):
return self.__torch_function__(torch.lt, (Wrapper,), (self, other))
def __le__(self, other):
return self.__torch_function__(torch.le, (Wrapper,), (self, other))
def __eq__(self, other):
return self.__torch_function__(torch.eq, (Wrapper,), (self, other))
def __ne__(self, other):
return self.__torch_function__(torch.ne, (Wrapper,), (self, other))
def __bool__(self):
return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,))
def __int__(self):
return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,))
def __len__(self):
return len(self._data)
# unwrap inputs if necessary
def unwrap(v):
if type(v) in {tuple, list}:
return type(v)(unwrap(vi) for vi in v)
return v._data if isinstance(v, Wrapper) else v
# wrap inputs if necessary
def wrap(v):
if type(v) in {tuple, list}:
return type(v)(wrap(vi) for vi in v)
return Wrapper(v) if isinstance(v, torch.Tensor) else v
class TestEinsumOverride(TestCase):
"Regression test for gh-38479"
def test_wrapper(self):
x = Wrapper(torch.randn(5))
y = Wrapper(torch.randn(4))
self.assertEqual(torch.einsum('i,j->ij', x, y)._data,
torch.ger(x, y)._data)
# in the old einsum interface, `operands` is a list
a = Wrapper(torch.randn(2, 3))
b = Wrapper(torch.randn(5, 3, 7))
c = Wrapper(torch.randn(2, 7))
self.assertEqual(torch.einsum('ik,jkl,il->ij', [a, b, c])._data,
torch.nn.functional.bilinear(a, c, b)._data)
class TestGradCheckOverride(TestCase):
"Test that wrappers work with gradcheck."
def test_gradcheck(self):
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
def run_test(fast_mode):
a = wrap(torch.tensor(5.0, dtype=torch.double))
b = wrap(torch.tensor(6.0, dtype=torch.double))
a.requires_grad = True
b.requires_grad = True
gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
total_used_attrs = a.used_attrs.union(b.used_attrs)
total_used_calls = a.used_calls.union(b.used_calls)
# These attributes (and the functions below) may change
# if the gradcheck implementation changes. It's best to
# aim for attributes that may be commonly present on other
# Tensor-likes.
expected_used_attrs = {
'data',
'dtype',
'is_floating_point',
'is_sparse',
'is_sparse_csr',
'layout',
'new_zeros',
'numel',
'requires_grad',
'requires_grad_',
'retain_grad',
'size',
'stride',
}
if fast_mode:
expected_used_attrs.add('is_complex')
expected_used_attrs.add('device')
self.assertEqual(expected_used_attrs, total_used_attrs)
expected_used_calls = {
torch.Tensor.new_zeros,
torch.Tensor.size,
torch.Tensor.is_floating_point,
torch.Tensor.numel,
torch.Tensor.retain_grad,
torch.Tensor.stride,
torch.Tensor.requires_grad_,
torch.autograd.grad,
torch.add,
}
if fast_mode:
expected_used_calls.add(torch.Tensor.is_complex)
self.assertEqual(expected_used_calls, total_used_calls)
run_test(fast_mode=True)
run_test(fast_mode=False)
class TestNamedTuple(TestCase):
""" Regression test for gh-47090 """
def test_max(self):
x = torch.tensor([1, 2])
xs = x.as_subclass(SubTensor2)
r = torch.max(x, dim=0)
rs = torch.max(xs, dim=0)
self.assertEqual(type(r), type(rs))
self.assertEqual(r, rs)
class TestGradNewOnesOverride(TestCase):
""" Regression test for gh-47069 """
def test_newones(self):
t = torch.tensor([1, 2]).as_subclass(SubTensor2)
n = t.new_ones((1, 2))
self.assertEqual(type(n), SubTensor2)
class TestPickle(TestCase):
"Regression test for gh-47051"
def test_pickle(self):
t = torch.tensor([1]).as_subclass(SubTensor2)
t.abcd = "e"
t2 = pickle.loads(pickle.dumps(t))
self.assertIs(type(t2), SubTensor2)
self.assertEqual(t2.abcd, "e")
class TestBroadcastAllOverride(TestCase):
""" test for gh-37141 """
def test_broadcast_all(self):
from torch.distributions.utils import broadcast_all
a = torch.tensor([1.2, 3.4, 5.6])
a_w = Wrapper(a)
b = torch.tensor(5.0)
b_w = Wrapper(b)
c = torch.tensor([5.0, 5.0, 5.0])
o_1 = broadcast_all(a_w, b_w)
self.assertTrue(isinstance(o_1[0], Wrapper))
self.assertTrue(isinstance(o_1[1], Wrapper))
self.assertEqual(o_1[0]._data, a)
self.assertEqual(o_1[1]._data, c)
o_2 = broadcast_all(a_w, b)
self.assertTrue(isinstance(o_2[0], Wrapper))
self.assertTrue(isinstance(o_2[1], Wrapper))
self.assertEqual(o_2[0]._data, a)
self.assertEqual(o_2[1]._data, c)
class TestWrapTorchFunction(TestCase):
def test_wrap_torch_function(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs):
return -1
def dispatcher(a):
return (a,)
@torch.overrides.wrap_torch_function(dispatcher)
def f(a):
return a
self.assertEqual(f(A()), -1)
class TestIndexing(TestCase):
""" Regression tests for gh-46277 """
def test_getitem(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_getitem_subclass(self):
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t[5, A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_setitem(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_val(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[0] = A()
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_subclass(self):
triggered = set()
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
class TestIterator(TestCase):
# Regression test for gh-54457
def test_iterator(self):
t = torch.tensor([5, 6, 7]).as_subclass(SubTensor2)
it = iter(t)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
class TestRNN(TestCase):
# Regression test for gh-55868
def test_rnn(self):
model = torch.nn.RNN(10, 20, 2)
input = Wrapper(torch.randn(1, 5, 10))
model(input)
class TestDisabledTorchFunction(TestCase):
# Regression test for gh-64687
def test_parameter_does_not_prevent_dispatch(self):
class MyTensor():
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return "called"
t1 = MyTensor()
t2 = torch.nn.Parameter(torch.rand(2, 2))
self.assertEqual(torch.add(t2, t1), "called")
inp = torch.rand(10, 10)
self.assertEqual(torch.nn.functional.linear(inp, t1, t2), "called")
self.assertEqual(torch.nn.functional.linear(inp, t2, t1), "called")
class TestResolveName(TestCase):
def test_resolve_name(self):
for cs in get_overridable_functions().values():
for c in cs:
self.assertEqual(
eval(torch.overrides.resolve_name(c)),
c,
msg=f"{c}, {torch.overrides.resolve_name(c)}"
)
class TestTorchFunctionWarning(TestCase):
def test_warn_on_invalid_torch_function(self):
class Bad1():
def __torch_function__(self, *args, **kwargs):
pass
class Bad2(torch.Tensor):
def __torch_function__(self, *args, **kwargs):
pass
a = Bad1()
for a in (Bad1(), Bad2()):
with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"):
# Function that handles torch_function on the python side
torch.nn.functional.dropout(a)
with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"):
# Function that handles torch_function in C++
torch.abs(a)
@unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref")
class TestTorchFunctionMode(TestCase):
def test_basic(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -1
# NB: factory functions get overridden too!
x = torch.randn(1)
with torch.overrides.push_torch_function_mode(A):
self.assertEqual(torch.randn(3), -1)
self.assertEqual(torch.add(x, x), -1)
self.assertEqual(torch.split(None, [2]), -1) # python side
self.assertEqual(bar(x), -1)
def test_factory_override(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -1
with torch.overrides.push_torch_function_mode(A):
self.assertEqual(torch.tensor([1]), -1)
self.assertEqual(torch.sparse_coo_tensor(1, 1, 1), -1)
self.assertEqual(torch.sparse_csr_tensor(1, 1, 1), -1)
self.assertEqual(torch._sparse_coo_tensor_unsafe(1, 1, (1, 1)), -1)
self.assertEqual(torch._sparse_csr_tensor_unsafe(1, 1, 1, (1, 1)), -1)
self.assertEqual(torch.as_tensor([1]), -1)
def test_enable_torch_function_mode_with_tensor_subclass(self):
x = torch.randn(1)
with torch.overrides.enable_torch_function_mode(SubTensor):
self.assertEqual(torch.mm(x, x), -1)
def test_modes_handle_first(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -40
x = SubTensor()
with torch.overrides.push_torch_function_mode(A):
self.assertEqual(torch.neg(x), -40)
self.assertEqual(torch.mean(x), -40)
self.assertEqual(torch.mm(x, x), -40)
self.assertEqual(bar(x), -40)
def test_modes_return_notimplemented(self):
class MyMode(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return NotImplemented
x = SubTensor()
with torch.overrides.push_torch_function_mode(MyMode):
self.assertEqual(torch.mean(x), 0)
self.assertEqual(torch.mm(x, x), -1)
self.assertEqual(bar(x), 1)
self.assertRaisesRegex(
TypeError, r'SubTensor.+MyMode',
lambda: self.assertEqual(torch.max(x, x)))
def test_mode_stack(self):
logs = []
class Logger(TorchFunctionMode):
def __init__(self, name):
self.name = name
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
logs.append(self.name)
return func(*args, **kwargs)
x = torch.randn(1)
with torch.overrides.push_torch_function_mode(partial(Logger, "A")):
with torch.overrides.push_torch_function_mode(partial(Logger, "B")):
torch.mean(x)
self.assertEqual(logs, ["B", "A"])
def test_push_mode_instance_errors(self):
class A(TorchFunctionMode):
pass
with self.assertRaisesRegex(ValueError, 'instance of TorchFunctionMode'):
with torch.overrides.push_torch_function_mode(A()):
pass
def test_push_mode_returns_unrelated(self):
with self.assertRaisesRegex(ValueError, 'return a TorchFunctionMode'):
with torch.overrides.push_torch_function_mode(lambda *, inner: None):
pass
def test_enable_torch_function_mode_trivial(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -40
a = A()
with torch.overrides.enable_torch_function_mode(a):
with torch.overrides.enable_torch_function_mode(a):
self.assertEqual(bar(None), -40)
def test_enable_torch_function_mode_replace(self):
class A(TorchFunctionMode):
def __init__(self, val):
self.val = val
def __torch_function__(self, *args, **kwargs):
return self.val
a1 = A(-40)
a2 = A(-41)
with torch.overrides.enable_torch_function_mode(a1):
with torch.overrides.enable_torch_function_mode(a2, replace=a1):
self.assertEqual(bar(None), -41)
def test_enable_torch_function_mode_ignore_preexisting(self):
class A(TorchFunctionMode):
def __init__(self, val):
self.val = val
def __torch_function__(self, *args, **kwargs):
return self.val
a1 = A(-40)
a2 = A(-41)
with torch.overrides.enable_torch_function_mode(a1):
with torch.overrides.enable_torch_function_mode(a2, ignore_preexisting=True):
self.assertEqual(bar(None), -41)
def test_ctor_no_inner(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return torch.zeros([])
with torch.overrides.enable_torch_function_mode(A()):
x = torch.randn((3, 4))
self.assertEqual(x, torch.zeros([]))
def test_with_mode(self):
class ErrorA(RuntimeError):
pass
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
raise ErrorA()
with self.assertRaises(ErrorA):
with A():
torch.empty([])
def test_with_mode_created_separately(self):
class ErrorA(RuntimeError):
pass
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
raise ErrorA()
x = A()
with self.assertRaises(ErrorA):
with x:
torch.empty([])
def test_with_nested_modes(self):
out = []
class A(TorchFunctionMode):
def __init__(self, msg):
self.msg = msg
def __torch_function__(self, func, _, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
out.append(self.msg)
return func(*args, **kwargs)
with A("layer1"):
with A("layer2"):
torch.empty([])
self.assertEqual(out, ["layer2", "layer1"])
def test_error_using_same_mode(self):
class A(TorchFunctionMode):
pass
x = A()
with x:
with self.assertRaisesRegex(RuntimeError, "has already been used as a mode"):
with x:
pass
def test_reentrant_mode_idiom(self):
log = []
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
log.append(func)
if func is torch.sub:
with torch.overrides.enable_torch_function_mode(self, replace=self.inner):
input, other = args
assert not kwargs
return torch.add(input, other, alpha=-1)
return func(*args, **kwargs)
x = torch.randn(1)
y = torch.randn(1)
with torch.overrides.push_torch_function_mode(A):
torch.sub(x, y)
# add hits the torch function again!
self.assertEqual(log, [torch.sub, torch.add])
def test_nn_parse_to(self):
# This failed because the parser thinks the function is called to()
# but it's actually called _parse_to()
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
with torch.overrides.push_torch_function_mode(A):
torch._C._nn._parse_to('cpu')
self.assertTrue(called)
def test_distributions_bernoulli(self):
# This failed because improper use of has_torch_function when
# is_tensor_like should have been used instead, inside the
# broadcasting logic called by distributions (Bernoulli doesn't
# matter per se)
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
with torch.overrides.push_torch_function_mode(A):
torch.distributions.Bernoulli(0.3)
self.assertTrue(called)
def test_mode_notimplemented_loop(self):
# Default tensor subclass implementation disables torch function;
# when we redispatch to mode we must not treat the objects as
# eligible
called = 0
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called += 1
# The first time we call, the mode sees an active type that
# it doesn't know how to deal with. The second time, we're
# instructed to treat it "as if it were a tensor", and so
# we keep going. I'm not entirely clear if the subclasses
# disappearing from types is the correct way to do it.
if any(t is not torch.Tensor for t in types):
return NotImplemented
else:
return func(*args, **kwargs)
class B(torch.Tensor):
pass
b = B()
with torch.overrides.push_torch_function_mode(A):
r = torch.neg(b)
self.assertIs(type(r), B)
self.assertEqual(called, 2)
called = 0
with torch.overrides.push_torch_function_mode(A):
r = bar(b)
self.assertIs(type(r), B)
self.assertEqual(called, 2)
def test_disable_subclass_not_mode(self):
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
class B(torch.Tensor):
pass
x = B(torch.randn(5))
with torch.overrides.push_torch_function_mode(A):
with torch._C.DisableTorchFunction():
self.assertNotIsInstance(torch.sum(x), B)
self.assertTrue(called)
if __name__ == '__main__':
run_tests()
| 35.585535
| 114
| 0.609318
|
ab199169624b6e46456b82ed737832fd9c909ef8
| 4,445
|
py
|
Python
|
scripts/surnames_redirects.py
|
nasqueron/pywikibot
|
60d21c39031582f6b0dae31d9283d36c2ec053d9
|
[
"MIT"
] | 3
|
2020-06-06T21:47:04.000Z
|
2021-09-08T18:22:59.000Z
|
pywikibot/scripts/surnames_redirects.py
|
ianhussey/destigmatize-suicide-bot
|
b79d49ad085f0d353bc24ece9e4a9938748e1dc4
|
[
"Unlicense"
] | 56
|
2016-12-13T04:57:36.000Z
|
2017-11-24T10:05:41.000Z
|
scripts/surnames_redirects.py
|
magul/pywikibot-core
|
4874edc0f3f314108bcd25486d9df817da8457fe
|
[
"MIT"
] | 1
|
2018-01-04T14:09:37.000Z
|
2018-01-04T14:09:37.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Bot to create redirects based on name order.
By default it creates a "Surnames, Given Names" redirect
version of a given page where title consists of 2 or 3 titlecased words.
Command-line arguments:
¶ms;
-surnames_last Creates a "Given Names Surnames" redirect version of a
given page where title is "Surnames, Given Names".
Example: "python pwb.py surnames_redirects -start:B"
"""
#
# (C) Pywikibot team, 2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from difflib import SequenceMatcher
import pywikibot
from pywikibot import i18n, pagegenerators
from pywikibot.bot import FollowRedirectPageBot, ExistingPageBot
docuReplacements = {'¶ms;': pagegenerators.parameterHelp}
class SurnamesBot(ExistingPageBot, FollowRedirectPageBot):
"""Surnames Bot."""
def __init__(self, generator, **kwargs):
"""Constructor.
Parameters:
@param generator: The page generator that determines on
which pages to work.
@kwarg surnames-last: Redirect "Surnames, Given Names" to
"Given Names Surnames".
"""
self.availableOptions.update({
'surnames_last': False,
})
super(SurnamesBot, self).__init__(generator=generator, **kwargs)
def treat_page(self):
"""Suggest redirects by reordering names in titles."""
if self.current_page.isRedirectPage():
return
page_t = self.current_page.title()
split_title = page_t.split(' (')
name = split_title[0]
site = self.current_page.site
possible_names = []
if self.getOption('surnames_last'):
name_parts = name.split(', ')
if len(name_parts) == 2 and len(name.split(' ')) <= 3:
possible_names.append(name_parts[1] + ' ' + name_parts[0])
else:
words = name.split()
if len(words) == 2 and name == name.title():
possible_names.append(words[1] + ', ' + words[0])
elif len(words) == 3:
# title may have at most one non-titlecased word
if len(SequenceMatcher(None, name,
name.title()).get_matching_blocks()) <= 3:
possible_names.append(words[1] + ' ' +
words[2] + ', ' +
words[0])
possible_names.append(words[2] + ', ' +
words[0] + ' ' +
words[1])
for possible_name in possible_names:
# append disambiguation inside parenthesis if there is one
if len(split_title) == 2:
possible_name += ' (' + split_title[1]
new_page = pywikibot.Page(site, possible_name)
if new_page.exists():
pywikibot.output('%s already exists, skipping...'
% new_page.title(asLink=True))
else:
pywikibot.output('%s doesn\'t exist'
% new_page.title(asLink=True))
choice = pywikibot.input_yn(
'Do you want to create a redirect?')
if choice:
comment = i18n.twtranslate(
site,
'capitalize_redirects-create-redirect',
{'to': page_t})
new_page.set_redirect_target(self.current_page,
create=True, summary=comment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg == '-surnames_last':
options['surnames_last'] = True
else:
genFactory.handleArg(arg)
gen = genFactory.getCombinedGenerator()
if gen:
bot = SurnamesBot(gen, **options)
bot.run()
else:
pywikibot.bot.suggest_help(missing_generator=True)
if __name__ == "__main__":
main()
| 32.683824
| 78
| 0.553656
|
246a7e30cf0ec12e9c91fdfd3574309f8cef1979
| 3,163
|
py
|
Python
|
numba/kmeans/CPU/kmeans.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 8
|
2021-03-26T15:17:58.000Z
|
2022-01-21T21:56:19.000Z
|
numba/kmeans/CPU/kmeans.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 22
|
2021-03-30T21:20:57.000Z
|
2022-02-22T13:42:17.000Z
|
numba/kmeans/CPU/kmeans.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 7
|
2021-03-23T11:00:43.000Z
|
2022-02-02T12:28:55.000Z
|
import base_kmeans
import numpy
import numba
from dpbench_decorators import jit
REPEAT = 1
ITERATIONS = 30
@jit(nopython=True, parallel=True, fastmath=True)
def groupByCluster(arrayP, arrayPcluster, arrayC, num_points, num_centroids):
for i0 in numba.prange(num_points):
minor_distance = -1
for i1 in range(num_centroids):
dx = arrayP[i0, 0] - arrayC[i1, 0]
dy = arrayP[i0, 1] - arrayC[i1, 1]
my_distance = numpy.sqrt(dx * dx + dy * dy)
if minor_distance > my_distance or minor_distance == -1:
minor_distance = my_distance
arrayPcluster[i0] = i1
return arrayPcluster
@jit(nopython=True, parallel=True, fastmath=True)
def calCentroidsSum(
arrayP, arrayPcluster, arrayCsum, arrayCnumpoint, num_points, num_centroids
):
for i in numba.prange(num_centroids):
arrayCsum[i, 0] = 0
arrayCsum[i, 1] = 0
arrayCnumpoint[i] = 0
for i in range(num_points):
ci = arrayPcluster[i]
arrayCsum[ci, 0] += arrayP[i, 0]
arrayCsum[ci, 1] += arrayP[i, 1]
arrayCnumpoint[ci] += 1
return arrayCsum, arrayCnumpoint
@jit(nopython=True, parallel=True, fastmath=True)
def updateCentroids(arrayC, arrayCsum, arrayCnumpoint, num_centroids):
for i in numba.prange(num_centroids):
arrayC[i, 0] = arrayCsum[i, 0] / arrayCnumpoint[i]
arrayC[i, 1] = arrayCsum[i, 1] / arrayCnumpoint[i]
def kmeans(
arrayP, arrayPcluster, arrayC, arrayCsum, arrayCnumpoint, num_points, num_centroids
):
for i in range(ITERATIONS):
groupByCluster(arrayP, arrayPcluster, arrayC, num_points, num_centroids)
calCentroidsSum(
arrayP, arrayPcluster, arrayCsum, arrayCnumpoint, num_points, num_centroids
)
updateCentroids(arrayC, arrayCsum, arrayCnumpoint, num_centroids)
return arrayC, arrayCsum, arrayCnumpoint
def printCentroid(arrayC, arrayCsum, arrayCnumpoint):
for i in range(NUMBER_OF_CENTROIDS):
print(
"[x={:6f}, y={:6f}, x_sum={:6f}, y_sum={:6f}, num_points={:d}]".format(
arrayC[i, 0],
arrayC[i, 1],
arrayCsum[i, 0],
arrayCsum[i, 1],
arrayCnumpoint[i],
)
)
print("--------------------------------------------------")
def run_kmeans(
arrayP,
arrayPclusters,
arrayC,
arrayCsum,
arrayCnumpoint,
NUMBER_OF_POINTS,
NUMBER_OF_CENTROIDS,
):
for i in range(REPEAT):
for i1 in range(NUMBER_OF_CENTROIDS):
arrayC[i1, 0] = arrayP[i1, 0]
arrayC[i1, 1] = arrayP[i1, 1]
arrayC, arrayCsum, arrayCnumpoint = kmeans(
arrayP,
arrayPclusters,
arrayC,
arrayCsum,
arrayCnumpoint,
NUMBER_OF_POINTS,
NUMBER_OF_CENTROIDS,
)
# if i + 1 == REPEAT:
# printCentroid(arrayC, arrayCsum, arrayCnumpoint)
# print("Iterations: {:d}".format(ITERATIONS))
# print("Average Time: {:.4f} ms".format(total))
base_kmeans.run("Kmeans Numba", run_kmeans)
| 27.745614
| 87
| 0.602592
|
c84b28082b1a9bf0c9bbca709c0df4f2cd4908a2
| 4,612
|
py
|
Python
|
data_process/Tempuckey/data_provider.py
|
JimShu716/Design_Project
|
7ae90cc7f6da7201e4cab9248cc59a9a25996451
|
[
"Apache-2.0"
] | null | null | null |
data_process/Tempuckey/data_provider.py
|
JimShu716/Design_Project
|
7ae90cc7f6da7201e4cab9248cc59a9a25996451
|
[
"Apache-2.0"
] | null | null | null |
data_process/Tempuckey/data_provider.py
|
JimShu716/Design_Project
|
7ae90cc7f6da7201e4cab9248cc59a9a25996451
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch.utils.data as data
import pickle
import os
import torch._utils
import torch
import io
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
# SAVE_PATH = '.\\feature\\'
SSP = '/usr/local/extstore01/zhouhan/Tempuckey/feature_somewhere'
VIDEO_MAX_LEN = 100
def collate(data):
# Sort a data list by caption length
# if data[0][1] is not None:
# data.sort(key=lambda x: len(x[1]), reverse=True)
# videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data)
videos, video_infos, captions, bow = zip(*data)
# Merge videos (convert tuple of 1D tensor to 4D tensor)
frame_vec_len = len(videos[0][0][0])
video_lengths = [min(VIDEO_MAX_LEN, len(frame)) for frame in videos]
video_datas = torch.zeros(len(videos), max(video_lengths), frame_vec_len)
video_means = torch.zeros(len(videos), frame_vec_len)
video_masks = torch.zeros(len(videos), max(video_lengths))
video_names = [info['video_name'] for info in video_infos]
for i, video in enumerate(videos):
end = video_lengths[i]
video = [v[0].float() for v in video]
video = torch.stack(video)
video_datas[i, :end, :] = video[:end, :]
video_means[i, :] = torch.mean(video, 0)
video_masks[i, :end] = 1.0
# Merge captions (convert tuple of 1D tensor to 2D tensor)
cap_lengths = [len(cap) for cap in captions]
cap_datas = torch.zeros(len(captions), max(cap_lengths)).long()
cap_masks = torch.zeros(len(captions), max(cap_lengths))
for i, cap in enumerate(bow):
cap = torch.from_numpy(cap)
end = cap_lengths[i]
cap_datas[i, :end] = cap[:end]
cap_masks[i, :end] = 1.0
#cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None
#TODO: bow2vec
cap_bows = None
video_data_pack = (video_datas,
video_means,
video_lengths,
video_masks,
video_names)
text_data_pack = (cap_datas,
cap_bows,
cap_lengths,
cap_masks)
return video_data_pack, text_data_pack
"""
A class to solve unpickling issues
"""
class CPU_Unpickler(pickle.Unpickler,object):
def find_class(self, module, name):
if module == 'torch.storage' and name == '_load_from_bytes':
return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
else: return super(CPU_Unpickler,self).find_class(module, name)
class TempuckeyDataSet(data.Dataset):
def __init__(self, read_path=SSP):
self.read_path = read_path
_, _, self.file_pool = next(os.walk(read_path))
self.length = len(self.file_pool)
print 'Initializing TempuckeyDataSet...'
print 'Read path: %s' % self.read_path
print 'Find %d files in the path.' % self.length
def __getitem__(self, index):
file_path = os.path.join(self.read_path, self.file_pool[index])
with open(file_path, 'rb') as f:
file = CPU_Unpickler(f).load()
video = file['feature']
video_info = file['video_info']
caption = (file['captions'])
bow = file['bow']
return video, video_info, caption, bow
def __len__(self):
return self.length
def get_data_loader(batch_size=10, num_workers=2):
"""
Returns torch.utils.data.DataLoader for train and validation datasets
Args:
cap_files: caption files (dict) keys: [train, val]
visual_feats: image feats (dict) keys: [train, val]
:param num_workers:
:param batch_size:
"""
data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate)
return data_loader
if __name__ == '__main__':
data_loader = get_data_loader()
for i, (video, caps) in enumerate(data_loader):
print 'enum ========== '+str(i)
print 'test break pt.'
| 32.70922
| 97
| 0.616219
|
d5d74f05df913c249e1b4a6f2e6f5c5a9c72f94e
| 4,050
|
py
|
Python
|
test/integration/devtools/test_hot_reload.py
|
PallHaraldsson/Dash.jl
|
9d2d74ffec2a89df8cb0af7cc8551049e240a4d0
|
[
"MIT"
] | null | null | null |
test/integration/devtools/test_hot_reload.py
|
PallHaraldsson/Dash.jl
|
9d2d74ffec2a89df8cb0af7cc8551049e240a4d0
|
[
"MIT"
] | null | null | null |
test/integration/devtools/test_hot_reload.py
|
PallHaraldsson/Dash.jl
|
9d2d74ffec2a89df8cb0af7cc8551049e240a4d0
|
[
"MIT"
] | null | null | null |
import pathlib
import os.path
import logging
import os
from time import sleep
logger = logging.getLogger(__name__)
curr_path = pathlib.Path(__file__).parent.absolute()
RED_BG = """
#hot-reload-content {
background-color: red;
}
"""
reload_js = """
document.getElementById('tested').innerHTML = 'Reloaded';
"""
reload_jl = """
using Dash
using DashHtmlComponents
using DashCoreComponents
app = dash()
app.layout = html_div(id="after-reload-content") do
html_h3("Hot restart"),
dcc_input(id="input", value="init"),
html_div(id="output")
end
callback!(app, callid"input.value => output.children") do value
return "after reload $value"
end
run_server(app,
dev_tools_hot_reload=true,
dev_tools_hot_reload_interval=0.1,
dev_tools_hot_reload_watch_interval=0.1,
dev_tools_hot_reload_max_retry=100
)
"""
def jl_test_file_path(filename):
return os.path.join(curr_path, "jl_hot_reload", filename)
def test_jldvhr001_hot_reload(dashjl):
fp = jl_test_file_path("jldvhr001_hot_reload.jl")
dashjl.start_server(fp)
dashjl.wait_for_text_to_equal(
"#tested", "Initial", timeout = 2
)
# default overload color is blue
dashjl.wait_for_style_to_equal(
"#hot-reload-content", "background-color", "rgba(0, 0, 255, 1)"
)
hot_reload_file = os.path.join(
os.path.dirname(__file__), "jl_hot_reload", "hr_assets", "hot_reload.css"
)
with open(hot_reload_file, "r+") as fp:
sleep(1) # ensure a new mod time
old_content = fp.read()
fp.truncate(0)
fp.seek(0)
fp.write(RED_BG)
try:
# red is live changed during the test execution
dashjl.wait_for_style_to_equal(
"#hot-reload-content", "background-color", "rgba(255, 0, 0, 1)"
)
finally:
sleep(1) # ensure a new mod time
with open(hot_reload_file, "w") as f:
f.write(old_content)
dashjl.wait_for_style_to_equal(
"#hot-reload-content", "background-color", "rgba(0, 0, 255, 1)"
)
dashjl.wait_for_text_to_equal(
"#tested", "Initial", timeout = 2
)
hot_reload_js_file = os.path.join(
os.path.dirname(__file__), "jl_hot_reload", "hr_assets", "hot_reload.js"
)
with open(hot_reload_js_file, "r+") as fp:
sleep(1) # ensure a new mod time
old_content = fp.read()
fp.truncate(0)
fp.seek(0)
fp.write(reload_js)
try:
dashjl.wait_for_text_to_equal(
"#tested", "Reloaded"
)
finally:
sleep(1) # ensure a new mod time
with open(hot_reload_js_file, "w") as f:
f.write(old_content)
dashjl.wait_for_text_to_equal(
"#tested", "Initial"
)
def test_jldvhr002_hot_restart(dashjl):
app_file = jl_test_file_path("jldvhr002_hot_restart.jl")
dashjl.start_server(app_file)
dashjl.wait_for_element_by_css_selector(
"#before-reload-content", timeout=2
)
dashjl.wait_for_text_to_equal(
"#output", "before reload initial", timeout=2
)
input_ = dashjl.find_element("#input")
dashjl.clear_input(input_)
input_.send_keys("test")
dashjl.wait_for_text_to_equal(
"#output", "before reload test", timeout=2
)
with open(app_file, "r+") as fp:
sleep(1) # ensure a new mod time
old_content = fp.read()
fp.truncate(0)
fp.seek(0)
fp.write(reload_jl)
try:
dashjl.wait_for_element_by_css_selector(
"#after-reload-content", timeout=30
)
dashjl.wait_for_text_to_equal(
"#output", "after reload init", timeout=1
)
input_ = dashjl.find_element("#input")
dashjl.clear_input(input_)
input_.send_keys("test")
dashjl.wait_for_text_to_equal(
"#output", "after reload test", timeout=1
)
finally:
sleep(1) # ensure a new mod time
with open(app_file, "w") as f:
f.write(old_content)
| 25.796178
| 81
| 0.627407
|
23d5b588caa378b805dc6796ec8d300d5031763b
| 771
|
py
|
Python
|
bm-backend/games/mafia/mafia_game.py
|
beyondmafiadev-warrens/BeyondMafia
|
a59c9011e0efbd277790a7f711902f849a69fb6f
|
[
"MIT"
] | 3
|
2021-04-12T18:21:37.000Z
|
2021-04-29T02:44:59.000Z
|
bm-backend/games/mafia/mafia_game.py
|
beyondmafiadev-warrens/BeyondMafia
|
a59c9011e0efbd277790a7f711902f849a69fb6f
|
[
"MIT"
] | 2
|
2021-04-12T18:31:07.000Z
|
2021-04-12T18:43:05.000Z
|
bm-backend/games/mafia/mafia_game.py
|
beyondmafiadev-warrens/BeyondMafia
|
a59c9011e0efbd277790a7f711902f849a69fb6f
|
[
"MIT"
] | 2
|
2021-09-27T16:09:01.000Z
|
2021-10-17T06:25:01.000Z
|
from interfaces.game_interface import GameInterface
import numpy as np
class MafiaGame(GameInterface):
def __init__(self, players : set[int], player_type : type, options = None) -> None:
self.players = {i : player_type(name) for i, name in players}
self.is_day = options['day_start']
self.initialize_players(options['alignments'])
def initialize_players(self, options) -> None:
roles = np.concatenate([np.random.choice(align['roles'], size = align['size'], replace = not align['unique']) for align in options.values()])
np.random.shuffle(roles)
for i, player in enumerate(self.players.values()):
player.set_role(roles[i])
def __repr__(self) -> str:
return str(self.players)
| 32.125
| 149
| 0.657588
|
fb6fdb2bc55b2593240d2555d0b837daf8f36253
| 13,652
|
py
|
Python
|
tests/projects/test_databricks.py
|
dnlcrl/mlflow
|
a2a60c143b403bb92e008207ea6978c6c9be931b
|
[
"Apache-2.0"
] | 1
|
2020-07-03T20:15:27.000Z
|
2020-07-03T20:15:27.000Z
|
tests/projects/test_databricks.py
|
dnlcrl/mlflow
|
a2a60c143b403bb92e008207ea6978c6c9be931b
|
[
"Apache-2.0"
] | null | null | null |
tests/projects/test_databricks.py
|
dnlcrl/mlflow
|
a2a60c143b403bb92e008207ea6978c6c9be931b
|
[
"Apache-2.0"
] | 1
|
2020-07-03T20:15:29.000Z
|
2020-07-03T20:15:29.000Z
|
import filecmp
import json
import mock
import os
import shutil
from databricks_cli.configure.provider import DatabricksConfig
import databricks_cli
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.projects.databricks import DatabricksJobRunner
from mlflow.entities import RunStatus
from mlflow.projects import databricks, ExecutionException
from mlflow.tracking import MlflowClient
from mlflow.utils import file_utils
from mlflow.store.tracking.file_store import FileStore
from mlflow.utils.mlflow_tags import MLFLOW_DATABRICKS_RUN_URL, \
MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID, \
MLFLOW_DATABRICKS_WEBAPP_URL
from mlflow.utils.rest_utils import _DEFAULT_HEADERS
from tests.projects.utils import validate_exit_status, TEST_PROJECT_DIR
from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import
@pytest.fixture()
def runs_cancel_mock():
"""Mocks the Jobs Runs Cancel API request"""
with mock.patch("mlflow.projects.databricks.DatabricksJobRunner.jobs_runs_cancel")\
as runs_cancel_mock:
runs_cancel_mock.return_value = None
yield runs_cancel_mock
@pytest.fixture()
def runs_submit_mock():
"""Mocks the Jobs Runs Submit API request"""
with mock.patch("mlflow.projects.databricks.DatabricksJobRunner._jobs_runs_submit")\
as runs_submit_mock:
runs_submit_mock.return_value = {"run_id": "-1"}
yield runs_submit_mock
@pytest.fixture()
def runs_get_mock():
"""Mocks the Jobs Runs Get API request"""
with mock.patch("mlflow.projects.databricks.DatabricksJobRunner.jobs_runs_get")\
as runs_get_mock:
yield runs_get_mock
@pytest.fixture()
def cluster_spec_mock(tmpdir):
cluster_spec_handle = tmpdir.join("cluster_spec.json")
cluster_spec_handle.write(json.dumps(dict()))
yield str(cluster_spec_handle)
@pytest.fixture()
def dbfs_root_mock(tmpdir):
yield str(tmpdir.join("dbfs-root"))
@pytest.fixture()
def upload_to_dbfs_mock(dbfs_root_mock):
def upload_mock_fn(_, src_path, dbfs_uri):
mock_dbfs_dst = os.path.join(dbfs_root_mock, dbfs_uri.split("/dbfs/")[1])
os.makedirs(os.path.dirname(mock_dbfs_dst))
shutil.copy(src_path, mock_dbfs_dst)
with mock.patch.object(
mlflow.projects.databricks.DatabricksJobRunner, "_upload_to_dbfs",
new=upload_mock_fn) as upload_mock:
yield upload_mock
@pytest.fixture()
def dbfs_path_exists_mock(dbfs_root_mock): # pylint: disable=unused-argument
with mock.patch("mlflow.projects.databricks.DatabricksJobRunner._dbfs_path_exists")\
as path_exists_mock:
yield path_exists_mock
@pytest.fixture()
def dbfs_mocks(dbfs_path_exists_mock, upload_to_dbfs_mock): # pylint: disable=unused-argument
yield
@pytest.fixture()
def before_run_validations_mock(): # pylint: disable=unused-argument
with mock.patch("mlflow.projects.databricks.before_run_validations"):
yield
@pytest.fixture()
def set_tag_mock():
with mock.patch("mlflow.projects.databricks.tracking.MlflowClient") as m:
mlflow_service_mock = mock.Mock(wraps=MlflowClient())
m.return_value = mlflow_service_mock
yield mlflow_service_mock.set_tag
def _get_mock_run_state(succeeded):
if succeeded is None:
return {"life_cycle_state": "RUNNING", "state_message": ""}
if succeeded:
run_result_state = "SUCCESS"
else:
run_result_state = "FAILED"
return {"life_cycle_state": "TERMINATED", "state_message": "", "result_state": run_result_state}
def mock_runs_get_result(succeeded):
run_state = _get_mock_run_state(succeeded)
return {"state": run_state, "run_page_url": "test_url"}
def run_databricks_project(cluster_spec, **kwargs):
return mlflow.projects.run(
uri=TEST_PROJECT_DIR, backend="databricks", backend_config=cluster_spec,
parameters={"alpha": "0.4"}, **kwargs)
def test_upload_project_to_dbfs(
dbfs_root_mock, tmpdir, dbfs_path_exists_mock,
upload_to_dbfs_mock): # pylint: disable=unused-argument
# Upload project to a mock directory
dbfs_path_exists_mock.return_value = False
runner = DatabricksJobRunner(databricks_profile="DEFAULT")
dbfs_uri = runner._upload_project_to_dbfs(
project_dir=TEST_PROJECT_DIR, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID)
# Get expected tar
local_tar_path = os.path.join(dbfs_root_mock, dbfs_uri.split("/dbfs/")[1])
expected_tar_path = str(tmpdir.join("expected.tar.gz"))
file_utils.make_tarfile(
output_filename=expected_tar_path, source_dir=TEST_PROJECT_DIR,
archive_name=databricks.DB_TARFILE_ARCHIVE_NAME)
# Extract the tarred project, verify its contents
assert filecmp.cmp(local_tar_path, expected_tar_path, shallow=False)
def test_upload_existing_project_to_dbfs(dbfs_path_exists_mock): # pylint: disable=unused-argument
# Check that we don't upload the project if it already exists on DBFS
with mock.patch("mlflow.projects.databricks.DatabricksJobRunner._upload_to_dbfs")\
as upload_to_dbfs_mock:
dbfs_path_exists_mock.return_value = True
runner = DatabricksJobRunner(databricks_profile="DEFAULT")
runner._upload_project_to_dbfs(
project_dir=TEST_PROJECT_DIR, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID)
assert upload_to_dbfs_mock.call_count == 0
def test_run_databricks_validations(
tmpdir, cluster_spec_mock, # pylint: disable=unused-argument
tracking_uri_mock, dbfs_mocks, set_tag_mock): # pylint: disable=unused-argument
"""
Tests that running on Databricks fails before making any API requests if validations fail.
"""
with mock.patch.dict(os.environ, {'DATABRICKS_HOST': 'test-host', 'DATABRICKS_TOKEN': 'foo'}),\
mock.patch("mlflow.projects.databricks.DatabricksJobRunner._databricks_api_request")\
as db_api_req_mock:
# Test bad tracking URI
tracking_uri_mock.return_value = tmpdir.strpath
with pytest.raises(ExecutionException):
run_databricks_project(cluster_spec_mock, synchronous=True)
assert db_api_req_mock.call_count == 0
db_api_req_mock.reset_mock()
mlflow_service = mlflow.tracking.MlflowClient()
assert (len(mlflow_service.list_run_infos(experiment_id=FileStore.DEFAULT_EXPERIMENT_ID))
== 0)
tracking_uri_mock.return_value = "http://"
# Test misspecified parameters
with pytest.raises(ExecutionException):
mlflow.projects.run(
TEST_PROJECT_DIR, backend="databricks", entry_point="greeter",
backend_config=cluster_spec_mock)
assert db_api_req_mock.call_count == 0
db_api_req_mock.reset_mock()
# Test bad cluster spec
with pytest.raises(ExecutionException):
mlflow.projects.run(TEST_PROJECT_DIR, backend="databricks", synchronous=True,
backend_config=None)
assert db_api_req_mock.call_count == 0
db_api_req_mock.reset_mock()
# Test that validations pass with good tracking URIs
databricks.before_run_validations("http://", cluster_spec_mock)
databricks.before_run_validations("databricks", cluster_spec_mock)
def test_run_databricks(
before_run_validations_mock, # pylint: disable=unused-argument
tracking_uri_mock, runs_cancel_mock, dbfs_mocks, # pylint: disable=unused-argument
runs_submit_mock, runs_get_mock, cluster_spec_mock, set_tag_mock):
"""Test running on Databricks with mocks."""
with mock.patch.dict(os.environ, {'DATABRICKS_HOST': 'test-host', 'DATABRICKS_TOKEN': 'foo'}):
# Test that MLflow gets the correct run status when performing a Databricks run
for run_succeeded, expect_status in [(True, RunStatus.FINISHED), (False, RunStatus.FAILED)]:
runs_get_mock.return_value = mock_runs_get_result(succeeded=run_succeeded)
submitted_run = run_databricks_project(cluster_spec_mock, synchronous=False)
assert submitted_run.wait() == run_succeeded
assert submitted_run.run_id is not None
assert runs_submit_mock.call_count == 1
tags = {}
for call_args, _ in set_tag_mock.call_args_list:
tags[call_args[1]] = call_args[2]
assert tags[MLFLOW_DATABRICKS_RUN_URL] == 'test_url'
assert tags[MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID] == '-1'
assert tags[MLFLOW_DATABRICKS_WEBAPP_URL] == 'test-host'
set_tag_mock.reset_mock()
runs_submit_mock.reset_mock()
validate_exit_status(submitted_run.get_status(), expect_status)
def test_run_databricks_cluster_spec_json(
before_run_validations_mock, # pylint: disable=unused-argument
tracking_uri_mock, runs_cancel_mock, dbfs_mocks, # pylint: disable=unused-argument
runs_submit_mock, runs_get_mock,
cluster_spec_mock, set_tag_mock): # pylint: disable=unused-argument
with mock.patch.dict(os.environ, {'DATABRICKS_HOST': 'test-host', 'DATABRICKS_TOKEN': 'foo'}):
runs_get_mock.return_value = mock_runs_get_result(succeeded=True)
cluster_spec = {
"spark_version": "5.0.x-scala2.11",
"num_workers": 2,
"node_type_id": "i3.xlarge",
}
# Run project synchronously, verify that it succeeds (doesn't throw)
run_databricks_project(cluster_spec=cluster_spec, synchronous=True)
assert runs_submit_mock.call_count == 1
runs_submit_args, _ = runs_submit_mock.call_args_list[0]
req_body = runs_submit_args[0]
assert req_body["new_cluster"] == cluster_spec
def test_run_databricks_cancel(
before_run_validations_mock, tracking_uri_mock, # pylint: disable=unused-argument
runs_submit_mock, dbfs_mocks, set_tag_mock, # pylint: disable=unused-argument
runs_cancel_mock, runs_get_mock, cluster_spec_mock):
# Test that MLflow properly handles Databricks run cancellation. We mock the result of
# the runs-get API to indicate run failure so that cancel() exits instead of blocking while
# waiting for run status.
with mock.patch.dict(os.environ, {'DATABRICKS_HOST': 'test-host', 'DATABRICKS_TOKEN': 'foo'}):
runs_get_mock.return_value = mock_runs_get_result(succeeded=False)
submitted_run = run_databricks_project(cluster_spec_mock, synchronous=False)
submitted_run.cancel()
validate_exit_status(submitted_run.get_status(), RunStatus.FAILED)
assert runs_cancel_mock.call_count == 1
# Test that we raise an exception when a blocking Databricks run fails
runs_get_mock.return_value = mock_runs_get_result(succeeded=False)
with pytest.raises(mlflow.projects.ExecutionException):
run_databricks_project(cluster_spec_mock, synchronous=True)
def test_get_tracking_uri_for_run():
mlflow.set_tracking_uri("http://some-uri")
assert databricks._get_tracking_uri_for_run() == "http://some-uri"
mlflow.set_tracking_uri("databricks://profile")
assert databricks._get_tracking_uri_for_run() == "databricks"
mlflow.set_tracking_uri(None)
with mock.patch.dict(os.environ, {mlflow.tracking._TRACKING_URI_ENV_VAR: "http://some-uri"}):
assert mlflow.tracking.utils.get_tracking_uri() == "http://some-uri"
class MockProfileConfigProvider:
def __init__(self, profile):
assert profile == "my-profile"
def get_config(self):
return DatabricksConfig("host", "user", "pass", None, insecure=False)
@mock.patch('requests.request')
@mock.patch('databricks_cli.configure.provider.get_config')
@mock.patch.object(databricks_cli.configure.provider, 'ProfileConfigProvider',
MockProfileConfigProvider)
def test_databricks_http_request_integration(get_config, request):
"""Confirms that the databricks http request params can in fact be used as an HTTP request"""
def confirm_request_params(**kwargs):
headers = dict(_DEFAULT_HEADERS)
headers['Authorization'] = 'Basic dXNlcjpwYXNz'
assert kwargs == {
'method': 'PUT',
'url': 'host/clusters/list',
'headers': headers,
'verify': True,
'json': {'a': 'b'}
}
http_response = mock.MagicMock()
http_response.status_code = 200
http_response.text = '{"OK": "woo"}'
return http_response
request.side_effect = confirm_request_params
get_config.return_value = \
DatabricksConfig("host", "user", "pass", None, insecure=False)
response = DatabricksJobRunner(databricks_profile=None)._databricks_api_request(
'/clusters/list', 'PUT', json={'a': 'b'})
assert json.loads(response.text) == {'OK': 'woo'}
get_config.reset_mock()
response = DatabricksJobRunner(databricks_profile="my-profile")._databricks_api_request(
'/clusters/list', 'PUT', json={'a': 'b'})
assert json.loads(response.text) == {'OK': 'woo'}
assert get_config.call_count == 0
@mock.patch("mlflow.utils.databricks_utils.get_databricks_host_creds")
def test_run_databricks_failed(_):
with mock.patch('mlflow.utils.rest_utils.http_request') as m:
text = '{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "Node type not supported"}'
m.return_value = mock.Mock(text=text, status_code=400)
runner = DatabricksJobRunner('profile')
with pytest.raises(MlflowException):
runner._run_shell_command_job('/project', 'command', {}, {})
| 43.202532
| 100
| 0.71821
|
ac1aa560a29e638d22df350d427c5c6ce620db63
| 14,721
|
py
|
Python
|
saleor/discount/models.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | 1
|
2018-05-03T06:17:02.000Z
|
2018-05-03T06:17:02.000Z
|
saleor/discount/models.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | 8
|
2018-05-07T16:42:35.000Z
|
2022-02-26T03:31:56.000Z
|
saleor/discount/models.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from datetime import date
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.db.models import F
from django.utils.translation import pgettext, pgettext_lazy
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django_countries import countries
from django_prices.models import PriceField
from django_prices.templatetags.prices_i18n import net
from prices import FixedDiscount, percentage_discount, Price
from utils import (
get_product_variants_and_prices, get_category_variants_and_prices)
from ..customer.models import Customer
class NotApplicable(ValueError):
pass
class VoucherQueryset(models.QuerySet):
def active(self):
today = date.today()
queryset = self.filter(
models.Q(usage_limit__isnull=True) |
models.Q(used__lt=models.F('usage_limit')))
queryset = queryset.filter(
models.Q(end_date__isnull=True) | models.Q(end_date__gte=today))
queryset = queryset.filter(start_date__lte=today)
return queryset
def increase_usage(self, voucher):
voucher.used = F('used') + 1
voucher.save(update_fields=['used'])
def decrease_usage(self, voucher):
voucher.used = F('used') - 1
voucher.save(update_fields=['used'])
@python_2_unicode_compatible
class Voucher(models.Model):
APPLY_TO_ONE_PRODUCT = 'one'
APPLY_TO_ALL_PRODUCTS = 'all'
APPLY_TO_PRODUCT_CHOICES = (
(APPLY_TO_ONE_PRODUCT,
pgettext_lazy('Voucher application', 'Apply to a single item')),
(APPLY_TO_ALL_PRODUCTS,
pgettext_lazy('Voucher application', 'Apply to all matching products')))
DISCOUNT_VALUE_FIXED = 'fixed'
DISCOUNT_VALUE_PERCENTAGE = 'percentage'
DISCOUNT_VALUE_TYPE_CHOICES = (
(DISCOUNT_VALUE_FIXED,
pgettext_lazy('Voucher discount type', settings.DEFAULT_CURRENCY)),
(DISCOUNT_VALUE_PERCENTAGE, pgettext_lazy('Voucher discount type', '%')))
PRODUCT_TYPE = 'product'
CATEGORY_TYPE = 'category'
SHIPPING_TYPE = 'shipping'
VALUE_TYPE = 'value'
TYPE_CHOICES = (
(VALUE_TYPE, pgettext_lazy('Voucher: discount for', 'All purchases')),
(PRODUCT_TYPE, pgettext_lazy('Voucher: discount for', 'One product')),
(CATEGORY_TYPE, pgettext_lazy('Voucher: discount for', 'A category of products')),
(SHIPPING_TYPE, pgettext_lazy('Voucher: discount for', 'Shipping')))
type = models.CharField(
pgettext_lazy('Voucher field', 'discount for'), max_length=20,
choices=TYPE_CHOICES, default=VALUE_TYPE)
name = models.CharField(
pgettext_lazy('Voucher field', 'name'), max_length=255, null=True,
blank=True)
code = models.CharField(
pgettext_lazy('Voucher field', 'code'), max_length=12, unique=True,
db_index=True)
usage_limit = models.PositiveIntegerField(
pgettext_lazy('Voucher field', 'usage limit'), null=True, blank=True)
used = models.PositiveIntegerField(default=0, editable=False)
start_date = models.DateField(
pgettext_lazy('Voucher field', 'start date'), default=date.today)
end_date = models.DateField(
pgettext_lazy('Voucher field', 'end date'), null=True, blank=True)
discount_value_type = models.CharField(
pgettext_lazy('Voucher field', 'discount type'), max_length=10,
choices=DISCOUNT_VALUE_TYPE_CHOICES, default=DISCOUNT_VALUE_FIXED)
discount_value = models.DecimalField(
pgettext_lazy('Voucher field', 'discount value'), max_digits=12,
decimal_places=2)
# not mandatory fields, usage depends on type
product = models.ForeignKey(
'product.Product', blank=True, null=True,
verbose_name=pgettext_lazy('Voucher field', 'product'))
category = models.ForeignKey(
'product.Category', blank=True, null=True,
verbose_name=pgettext_lazy('Voucher field', 'category'))
apply_to = models.CharField(
pgettext_lazy('Voucher field', 'apply to'),
max_length=20, blank=True, null=True)
limit = PriceField(
pgettext_lazy('Voucher field', 'limit'),
max_digits=12, decimal_places=2, null=True,
blank=True, currency=settings.DEFAULT_CURRENCY)
objects = VoucherQueryset.as_manager()
@property
def is_free(self):
return (self.discount_value == Decimal(100) and
self.discount_value_type == Voucher.DISCOUNT_VALUE_PERCENTAGE)
class Meta:
verbose_name = pgettext_lazy('Voucher model', 'voucher')
verbose_name_plural = pgettext_lazy('Voucher model', 'vouchers')
def __str__(self):
if self.name:
return self.name
discount = '%s %s' % (
self.discount_value, self.get_discount_value_type_display())
if self.type == Voucher.SHIPPING_TYPE:
if self.is_free:
return pgettext('Voucher type', 'Free shipping')
else:
return pgettext('Voucher type', '%(discount)s off shipping') % {
'discount': discount}
if self.type == Voucher.PRODUCT_TYPE:
return pgettext('Voucher type', '%(discount)s off %(product)s') % {
'discount': discount, 'product': self.product}
if self.type == Voucher.CATEGORY_TYPE:
return pgettext('Voucher type', '%(discount)s off %(category)s') % {
'discount': discount, 'category': self.category}
return pgettext('Voucher type', '%(discount)s off') % {'discount': discount}
def get_apply_to_display(self):
if self.type == Voucher.SHIPPING_TYPE and self.apply_to:
return countries.name(self.apply_to)
if self.type == Voucher.SHIPPING_TYPE:
return pgettext('Voucher', 'Any country')
if self.apply_to and self.type in {
Voucher.PRODUCT_TYPE, Voucher.CATEGORY_TYPE}:
choices = dict(self.APPLY_TO_PRODUCT_CHOICES)
return choices[self.apply_to]
def get_fixed_discount_for(self, amount):
if self.discount_value_type == self.DISCOUNT_VALUE_FIXED:
discount_price = Price(net=self.discount_value,
currency=settings.DEFAULT_CURRENCY)
discount = FixedDiscount(
amount=discount_price, name=smart_text(self))
elif self.discount_value_type == self.DISCOUNT_VALUE_PERCENTAGE:
discount = percentage_discount(
value=self.discount_value, name=smart_text(self))
fixed_discount_value = amount - discount.apply(amount)
discount = FixedDiscount(
amount=fixed_discount_value, name=smart_text(self))
else:
raise NotImplementedError('Unknown discount value type')
if discount.amount > amount:
return FixedDiscount(amount, name=smart_text(self))
else:
return discount
def validate_limit(self, value):
limit = self.limit if self.limit is not None else value
if value < limit:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for orders over %(amount)s.')
raise NotApplicable(msg % {'amount': net(limit)})
def get_discount_for_checkout(self, checkout):
if self.type == Voucher.VALUE_TYPE:
cart_total = checkout.get_subtotal()
self.validate_limit(cart_total)
return self.get_fixed_discount_for(cart_total)
elif self.type == Voucher.SHIPPING_TYPE:
if not checkout.is_shipping_required:
msg = pgettext(
'Voucher not applicable', 'Your order does not require shipping.')
raise NotApplicable(msg)
shipping_method = checkout.shipping_method
if not shipping_method:
msg = pgettext(
'Voucher not applicable', 'Please select a shipping method first.')
raise NotApplicable(msg)
if (self.apply_to and
shipping_method.country_code != self.apply_to):
msg = pgettext(
'Voucher not applicable', 'This offer is only valid in %(country)s.')
raise NotApplicable(msg % {
'country': self.get_apply_to_display()})
cart_total = checkout.get_subtotal()
self.validate_limit(cart_total)
return self.get_fixed_discount_for(shipping_method.price)
elif self.type in (Voucher.PRODUCT_TYPE, Voucher.CATEGORY_TYPE):
if self.type == Voucher.PRODUCT_TYPE:
prices = list(
(item[1] for item in get_product_variants_and_prices(
checkout.cart, self.product)))
else:
prices = list(
(item[1] for item in get_category_variants_and_prices(
checkout.cart, self.category)))
if len(prices) == 0:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for selected items.')
raise NotApplicable(msg)
if self.apply_to == Voucher.APPLY_TO_ALL_PRODUCTS:
discounts = (
self.get_fixed_discount_for(price) for price in prices)
discount_total = sum(
(discount.amount for discount in discounts),
Price(0, currency=settings.DEFAULT_CURRENCY))
return FixedDiscount(discount_total, smart_text(self))
else:
product_total = sum(
prices, Price(0, currency=settings.DEFAULT_CURRENCY))
return self.get_fixed_discount_for(product_total)
else:
raise NotImplementedError('Unknown discount type')
@python_2_unicode_compatible
class Sale(models.Model):
FIXED = 'fixed'
PERCENTAGE = 'percentage'
DISCOUNT_TYPE_CHOICES = (
(FIXED, pgettext_lazy('Discount type', settings.DEFAULT_CURRENCY)),
(PERCENTAGE, pgettext_lazy('Discount type', '%')))
name = models.CharField(pgettext_lazy('Sale (discount) field', 'name'), max_length=255)
type = models.CharField(
pgettext_lazy('Sale (discount) field', 'type'),
max_length=10, choices=DISCOUNT_TYPE_CHOICES, default=FIXED)
value = models.DecimalField(
pgettext_lazy('Sale (discount) field', 'value'),
max_digits=12, decimal_places=2, default=0)
products = models.ManyToManyField(
'product.Product', blank=True,
verbose_name=pgettext_lazy('Sale (discount) field', 'products'))
variant = models.ManyToManyField(
'product.ProductVariant', blank=True,
verbose_name=pgettext_lazy('Sale (discount) field', 'products'))
categories = models.ManyToManyField(
'product.Category', blank=True,
verbose_name=pgettext_lazy('Sale (discount) field', 'categories'))
customers = models.ManyToManyField(
Customer, blank=True,related_name='customer_discount',
verbose_name=pgettext_lazy('Sale (discount) field', 'customers'))
start_date = models.DateField(
pgettext_lazy('Sale field', 'start date'), default=date.today)
end_date = models.DateField(
pgettext_lazy('Sale field', 'end date'), null=True, blank=True)
class Meta:
app_label = 'discount'
verbose_name = pgettext_lazy('Sale (discount) model', 'sale')
verbose_name_plural = pgettext_lazy('Sales (discounts) model', 'sales')
def __repr__(self):
return 'Sale(name=%r, value=%r, type=%s)' % (
str(self.name), self.value, self.get_type_display())
def __str__(self):
return self.name
def get_discount(self):
if self.type == self.FIXED:
discount_price = Price(net=self.value,
currency=settings.DEFAULT_CURRENCY)
return FixedDiscount(amount=discount_price, name=self.name)
elif self.type == self.PERCENTAGE:
return percentage_discount(value=self.value, name=self.name)
raise NotImplementedError('Unknown discount type')
def _product_has_category_discount(self, product, discounted_categories):
for category in product.categories.all():
for discounted_category in discounted_categories:
if category.is_descendant_of(discounted_category,
include_self=True):
return True
return False
def modifier_for_product(self, product):
discounted_products = {p.pk for p in self.products.all()}
discounted_categories = set(self.categories.all())
if product.pk in discounted_products:
return self.get_discount()
if self._product_has_category_discount(
product, discounted_categories):
return self.get_discount()
raise NotApplicable(
pgettext(
'Voucher not applicable',
'Discount not applicable for this product'))
def modifier_for_variant(self, variant):
discounted_variants = {p.pk for p in self.variant.all()}
discounted_categories = set(self.categories.all())
if variant.pk in discounted_variants:
return self.get_discount()
raise NotApplicable(
pgettext(
'Voucher not applicable',
'Discount not applicable for this product'))
def get_variant_discounts(variant, discounts, **kwargs):
for discount in discounts:
try:
yield discount.modifier_for_variant(variant, **kwargs)
except NotApplicable:
pass
def get_product_discounts(product, discounts, **kwargs):
for discount in discounts:
try:
yield discount.modifier_for_product(product, **kwargs)
except NotApplicable:
pass
def calculate_discounted_price(product, price, discounts, **kwargs):
if discounts:
discounts = list(
get_product_discounts(product, discounts, **kwargs))
if discounts:
price = min(price | discount for discount in discounts)
return price
| 42.06
| 92
| 0.623463
|
159023bcf61632c8fdc017063fcddb961a956727
| 6,586
|
py
|
Python
|
deep_lincs/models/multi_classifier.py
|
manzt/lincs-ae
|
51b2fbaa0a87580e545b5a962a418ca8abc6a3bf
|
[
"MIT"
] | 7
|
2019-10-31T14:03:20.000Z
|
2021-07-31T01:28:47.000Z
|
deep_lincs/models/multi_classifier.py
|
manzt/lincs-ae
|
51b2fbaa0a87580e545b5a962a418ca8abc6a3bf
|
[
"MIT"
] | null | null | null |
deep_lincs/models/multi_classifier.py
|
manzt/lincs-ae
|
51b2fbaa0a87580e545b5a962a418ca8abc6a3bf
|
[
"MIT"
] | 1
|
2020-07-03T00:16:07.000Z
|
2020-07-03T00:16:07.000Z
|
import pandas as pd
import numpy as np
import altair as alt
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense, Dropout
from sklearn.metrics import confusion_matrix
from .base_network import BaseNetwork
class MultiClassifier(BaseNetwork):
"""Represents a classifier for multiple metadata fields
Parameters
----------
dataset : ``Dataset``
An instance of a ``Dataset`` intended to train and evaluate a model.
targets : ``list(str)``
Valid lists of metadata fields which define multiple classification tasks.
test_sizes : tuple, (optional, default ( ``0.2`` , ``0.2`` ))
Size of test splits for dividing the dataset into training, validation, and, testing
Attributes
----------
targets : ``list(str)``
Targets for model.
train : ``Dataset``
Dataset used to train the model.
val : ``Dataset``
Dataset used during training as validation.
test : ``Dataset``
Dataset used to evaluate the model.
model : ``tensorflow.keras.Model``
Compiled and trained model.
in_size : ``int``
Size of inputs (generally 978 for L1000 landmark genes).
out_size : ``int``
Sum total of classification categories.
"""
def __init__(self, dataset, targets, **kwargs):
for target in targets:
dataset._data[target] = pd.Categorical(dataset._data[target])
super(MultiClassifier, self).__init__(dataset=dataset, target=targets, **kwargs)
self.in_size, self.out_size = self._get_in_out_size(dataset, targets)
def compile_model(
self,
hidden_layers,
dropout_rate=0.0,
activation="relu",
optimizer="adam",
final_activation="softmax",
):
"""Defines how model is built and compiled
Parameters
----------
hidden_layers : ``list(int)``
A list describing the size of the hidden layers.
dropout_rate : ``float`` (optional: default ``0.0``)
Dropout rate used during training. Applied to all hidden layers.
activation : ``str``, (optional: default ``"relu"``)
Activation function used in hidden layers.
optimizer : ``str``, (optional: default ``"adam"``)
Optimizer used during training.
final_activation : ``str`` (optional: default ``"softmax"``)
Activation function used in final layer.
loss : ``str`` (optional: default ``"categorical_crossentropy"``)
Loss function.
Returns
-------
``None``
"""
inputs = Input(shape=(self.in_size,))
x = Dropout(dropout_rate)(inputs)
for nunits in hidden_layers:
x = Dense(nunits, activation=activation)(x)
x = Dropout(dropout_rate)(x)
outputs = [
Dense(size, activation=final_activation, name=name)(x)
for name, size in self.target_info.items()
]
model = Model(inputs, outputs)
model.compile(
optimizer=optimizer, loss=loss, metrics=["accuracy"]
)
self.model = model
def _get_in_out_size(self, dataset, targets):
self.target_info = {}
for target in targets:
unique_targets = dataset.sample_meta[target].unique().tolist()
if np.nan in unique_targets:
raise Exception(
f"Dataset contains np.nan entry in '{target}'. "
f"You can drop these samples to train the "
f"classifier with Dataset.drop_na('{target}')."
)
self.target_info[target] = len(unique_targets)
in_size = dataset.data.shape[1]
out_size = sum(self.target_info.values())
return in_size, out_size
def plot_confusion_matrix(
self, normalize=True, zero_diag=False, size=300, color_scheme="lightgreyteal"
):
"""Evaluates model and plots a confusion matrix of classification results
Parameters
----------
normalize : ``bool``, (optional: default ``True``)
Whether to normalize counts to frequencies.
zero_diag : ``bool`` (optional: default ``False``)
Whether to zero the diagonal of matrix. Useful for examining which categories
are most frequently misidenitfied.
size : ``int``, (optional: default ``300``)
Size of the plot in pixels.
color_scheme : ``str``, (optional: default ``"lightgreyteal"``)
Color scheme in heatmap. Can be any from https://vega.github.io/vega/docs/schemes/.
Returns
-------
``altair.Chart`` object
"""
y_dummies = [pd.get_dummies(self.test.sample_meta[t]) for t in self.target]
y_pred = self.predict()
heatmaps = [
self._create_heatmap(d, p, normalize, zero_diag, size, color_scheme, title)
for d, p, title in zip(y_dummies, y_pred, self.target)
]
return alt.hconcat(*heatmaps)
def _create_heatmap(
self, y_dummies, y_pred, normalize, zero_diag, size, color_scheme, title
):
classes = y_dummies.columns.tolist()
y_test = y_dummies.values
cm = confusion_matrix(y_test.argmax(1), y_pred.argmax(1))
if zero_diag:
np.fill_diagonal(cm, 0)
if normalize:
cm = cm / cm.sum(axis=1)[:, np.newaxis]
df = (
pd.DataFrame(cm.round(2), columns=classes, index=classes)
.reset_index()
.melt(id_vars="index")
.round(2)
)
base = alt.Chart(df).encode(
x=alt.X("index:N", title="Predicted Label"),
y=alt.Y("variable:N", title="True Label"),
tooltip=["value"],
)
heatmap = base.mark_rect().encode(
color=alt.Color("value:Q", scale=alt.Scale(scheme=color_scheme))
)
text = base.mark_text(size=0.5 * (size / len(classes))).encode(
text=alt.Text("value")
)
return (heatmap + text).properties(width=size, height=size, title=title)
def __repr__(self):
return (
f"<MultiClassifier: "
f"(targets: {self.target}, "
f"input_size: {self.in_size}, "
f"output_size: {self.out_size})>"
)
| 33.602041
| 99
| 0.567568
|
f624cab27819696d157df96323e729fc67ccd128
| 1,874
|
py
|
Python
|
framework/agent/network/network.py
|
Francesco-Sovrano/Combining--experience-replay--with--exploration-by-random-network-distillation-
|
bf6f2b9a8703227dc40b06bbb170cbb89c04e76f
|
[
"MIT"
] | 4
|
2019-06-03T08:55:54.000Z
|
2021-02-01T14:42:44.000Z
|
framework/agent/network/network.py
|
Francesco-Sovrano/Combining--experience-replay--with--exploration-by-random-network-distillation-
|
bf6f2b9a8703227dc40b06bbb170cbb89c04e76f
|
[
"MIT"
] | null | null | null |
framework/agent/network/network.py
|
Francesco-Sovrano/Combining--experience-replay--with--exploration-by-random-network-distillation-
|
bf6f2b9a8703227dc40b06bbb170cbb89c04e76f
|
[
"MIT"
] | 2
|
2019-06-03T08:59:55.000Z
|
2020-11-16T22:46:58.000Z
|
# -*- coding: utf-8 -*-
import tensorflow as tf
class Network():
def __init__(self, id, training):
self.training = training
self.id = id
self.use_internal_state = False
# Initialize keys collections
self.shared_keys = []
self.update_keys = []
def _update_keys(self, scope_name, share_trainables):
if share_trainables:
self.shared_keys += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)
self.update_keys += tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope_name)
def _batch_normalization_layer(self, input, scope, name="", share_trainables=True, renorm=False, center=True, scale=True):
layer_type = 'BatchNorm'
with tf.variable_scope("{}/{}{}".format(scope,layer_type,name), reuse=tf.AUTO_REUSE) as variable_scope:
print( " [{}]Building or reusing scope: {}".format(self.id, variable_scope.name) )
batch_norm = tf.layers.BatchNormalization(renorm=renorm, center=center, scale=scale) # renorm when minibaches are too small
norm_input = batch_norm.apply(input, training=self.training)
# update keys
self._update_keys(variable_scope.name, share_trainables)
# return result
return batch_norm, norm_input
def _feature_entropy_layer(self, input, scope, name="", share_trainables=True): # feature entropy measures how much the input is uncommon
layer_type = 'Fentropy'
batch_norm, _ = self._batch_normalization_layer(input=input, scope=scope, name=layer_type)
with tf.variable_scope("{}/{}{}".format(scope,layer_type,name), reuse=tf.AUTO_REUSE) as variable_scope:
fentropy = Normal(batch_norm.moving_mean, tf.sqrt(batch_norm.moving_variance)).cross_entropy(input)
fentropy = tf.layers.flatten(fentropy)
if len(fentropy.get_shape()) > 1:
fentropy = tf.reduce_mean(fentropy, axis=-1)
# update keys
self._update_keys(variable_scope.name, share_trainables)
return fentropy
| 46.85
| 138
| 0.752401
|
10680fff3a69bcf6d9ef8a5489adb316b570f2f4
| 9,360
|
py
|
Python
|
ald/core/callback.py
|
zpeng2/ald
|
040ce6176998a9ca024eb9f420e8c6c63ca6af81
|
[
"MIT"
] | null | null | null |
ald/core/callback.py
|
zpeng2/ald
|
040ce6176998a9ca024eb9f420e8c6c63ca6af81
|
[
"MIT"
] | 1
|
2020-11-29T06:46:22.000Z
|
2020-11-29T06:46:22.000Z
|
ald/core/callback.py
|
zpeng2/ald
|
040ce6176998a9ca024eb9f420e8c6c63ca6af81
|
[
"MIT"
] | null | null | null |
import pycuda.gpuarray as gpuarray
from abc import abstractmethod, ABC
import numpy as np
import time
import datetime
import h5py
import os
class CallbackRunner(ABC):
"""Decide whether to run a callback."""
@abstractmethod
def iscomputing(self, i):
pass
class Always(CallbackRunner):
def iscomputing(self, i):
return True
class RangedRunner(CallbackRunner):
"""Run callback if an index is in a range."""
def __init__(self, start, stop, freq):
# make a range that includes the stop as a point
# number of groups that have freq numbers
k = (stop - start + 1) // freq
start = stop - (k - 1) * freq
# python range is not right-end inclusive
stop = stop + 1
self._range = range(start, stop, freq)
@classmethod
def from_forward_count(cls, start=0, freq=1, count=10):
"""Constructor that takes the start, freq and number of points."""
stop = start + (count - 1) * freq
return cls(start, stop, freq)
@classmethod
def from_backward_count(cls, stop=100, freq=1, count=10):
"""Constructor that takes the stop, freq and number of points."""
start = stop - (count - 1) * freq
# require start to be positive, otherwise pointless
if start < 0:
raise ValueError("start={} is negative".format(start))
return cls(start, stop, freq)
def iscomputing(self, i):
if i in self._range:
return True
else:
return False
def __len__(self):
return len(self._range)
class Callback(ABC):
"""Callback that computes/stores information as the simulation evolves."""
def __init__(self, runner):
if not isinstance(runner, CallbackRunner):
raise TypeError()
self.runner = runner
@abstractmethod
def __call__(self, *args, **kwargs):
pass
class DisplacementMeanVariance(Callback):
"""Compute mean and variance of a GPUArray."""
def __init__(self, runner, variable, unwrap=False):
super().__init__(runner)
# which cfg attribute to do statistics.
if not variable in ["x", "y"]:
raise ValueError("invalid variable: {}".format(variable))
self.variable = variable
# unwrap periodic to get absolute positions.
self.unwrap = unwrap
# instantiate arrays to store mean and variance
self.m = np.zeros(len(self.runner))
self.v = np.zeros_like(self.m)
self.minx = np.zeros_like(self.m)
# keep track of time
self.t = np.zeros_like(self.m)
# index used to store values
self.idx = 0
def mean_variance(self, cfg):
# get data.
x = getattr(cfg, self.variable).copy()
# initial location
x0 = getattr(cfg, self.variable + "0").copy()
# unwrap
if self.unwrap:
# get the boundary crossing array.
passx = getattr(cfg, "pass" + self.variable).copy()
# do unwrap
L = getattr(cfg.domain, "L" + self.variable)
# need the relative to the initial positions
x -= x0
x += passx * L
else:
x -= x0
N = len(x)
mean_x = gpuarray.sum(x) / N
min_x = gpuarray.min(x)
# copy to cpu and flatten
mean_x = float(mean_x.get())
min_x = float(min_x.get())
# compute variance
variance_x = gpuarray.sum((x - mean_x) ** 2) / N
variance_x = float(variance_x.get())
return mean_x, variance_x, min_x
def __call__(self, i, cfg):
if self.runner.iscomputing(i):
m, v, minx = self.mean_variance(cfg)
self.m[self.idx] = m
self.minx[self.idx] = minx
self.v[self.idx] = v
self.t[self.idx] = cfg.t
self.idx += 1
return None
def save2h5(self, file, group):
"""Save m and t to file"""
# save
with h5py.File(file, "r+") as f:
f[os.path.join(group, "t")] = self.t
f[os.path.join(group, "m")] = self.m
f[os.path.join(group, "minx")] = self.minx
f[os.path.join(group, "v")] = self.v
class SimpleMean(Callback):
"""Compute simple mean"""
def __init__(self, runner, variable, keep_time=False):
super().__init__(runner)
self.variable = variable
# instantiate arrays to store mean and variance
self.m = np.zeros(len(self.runner))
# keep track of time
self.keep_time = keep_time
if keep_time:
self.t = np.zeros_like(self.m)
# index used to store values
self.idx = 0
def compute_mean(self, cfg):
# get data.
x = getattr(cfg, self.variable).copy()
N = len(x)
mean_x = gpuarray.sum(x) / N
# copy to cpu and flatten
mean_x = float(mean_x.get())
return mean_x
def __call__(self, i, cfg):
if self.runner.iscomputing(i):
self.m[self.idx] = self.compute_mean(cfg)
if self.keep_time:
self.t[self.idx] = cfg.t
self.idx += 1
return None
def save2h5(self, file, group):
"""Save m and t to file"""
# save
with h5py.File(file, "r+") as f:
f[os.path.join(group, "t")] = self.t
f[os.path.join(group, "m")] = self.m
class PrintCallback(Callback):
def __init__(self, runner):
super().__init__(runner)
def __call__(self, i, cfg):
if self.runner.iscomputing(i):
print("t = {:.3f}".format(cfg.t))
class ETA(Callback):
def __init__(self, runner):
super().__init__(runner)
def __call__(self, i, cfg):
"""Printout TPS and ETA."""
if i == 0:
self.start = time.time()
else:
if self.runner.iscomputing(i):
elapsed = time.time() - self.start
# timesteps per second
tps = int(i / elapsed)
# estimated remaining time
# total estimated time
total = cfg.Nt / tps
eta = total - elapsed
# convert seconds to human friendly format hh:mm:ss
eta_human = str(datetime.timedelta(seconds=int(eta)))
# print output
print("TPS:{0}, ETA:{1}".format(tps, eta_human))
class ConfigSaver(Callback):
def __init__(
self, runner, file, variables=["x", "y", "theta"], unwrap=[False, False, False]
):
super().__init__(runner)
self.variables = variables
if not isinstance(file, str):
raise TypeError("invalid filename")
self.file = file
# length of vcariables and unwrap should be the same
if len(variables) != len(unwrap):
raise ValueError("lengths of variables and unwrap do not match")
# theta cannot unwrap
if "theta" in variables:
i = variables.index("theta")
if unwrap[i]:
raise ValueError("cannot unwrap theta")
self.unwrap = unwrap
# keep a frame counter
self.counter = 0
# if file exists, error
if os.path.exists(self.file):
raise ValueError("file: {} exists.".format(self.file))
else:
# create file
with open(self.file, "w") as f:
pass
def get_config(self, variable, cfg, unwrap):
# need to unwrap if requested
# need to copy data! because it will be modified later.
variable_gpu = getattr(cfg, variable).copy()
if unwrap:
variable_gpu += getattr(cfg, "pass" + variable).copy()
# need to copy to cpu
return variable_gpu.get()
def __call__(self, i, cfg):
if self.runner.iscomputing(i):
with h5py.File(self.file, "r+") as f:
path = "config/{}/".format(self.counter)
# keep track of time
f[path + "t"] = cfg.t
# save configuration
for variable, unwrap in zip(self.variables, self.unwrap):
configpath = path + "{}".format(variable)
f[configpath] = self.get_config(variable, cfg, unwrap)
# need to update counter
self.counter += 1
# class RuntimeSaver(Callback):
# """Save the sampled runtime history of aprticles."""
# def __init__(self, file):
# runner = Always()
# super().__init__(runner)
# if not isinstance(file, str):
# raise TypeError("invalid filename")
# self.file = file
# # array of runtime history regardless of particle id.
# self.runtimes = []
# def __call__(self, i, cfg):
# if self.runner.iscomputing(i):
# tauR = cfg.tauR.get()
# tau = cfg.tau.get()
# # if tau is zero, means I just tumbled and my runtime is newly generated.
# # and I need to record it.
# for i in range(cfg.N):
# if tau[i] == 0.0:
# self.runtimes.append(tauR[i])
# def save2h5(self):
# with h5py.File(self.file, "r+") as f:
# path = "runtimes"
# # keep track of time
# f[path] = np.array(self.runtimes)
| 32.164948
| 87
| 0.552244
|
cfdc3f92a20e73650154786cd5735a474d13819b
| 1,514
|
py
|
Python
|
test/test_blockchain_data_transaction_not_found.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_blockchain_data_transaction_not_found.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_blockchain_data_transaction_not_found.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.blockchain_data_transaction_not_found_error import BlockchainDataTransactionNotFoundError
globals()['BlockchainDataTransactionNotFoundError'] = BlockchainDataTransactionNotFoundError
from cryptoapis.model.blockchain_data_transaction_not_found import BlockchainDataTransactionNotFound
class TestBlockchainDataTransactionNotFound(unittest.TestCase):
"""BlockchainDataTransactionNotFound unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBlockchainDataTransactionNotFound(self):
"""Test BlockchainDataTransactionNotFound"""
# FIXME: construct object with mandatory attributes with example values
# model = BlockchainDataTransactionNotFound() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 38.820513
| 484
| 0.785997
|
2b8fb34fb1210990eef456727ca22355623f7e64
| 2,671
|
py
|
Python
|
rivalcfg/udev.py
|
nixtux/rivalcfg
|
abe0dd650f6a592c539751cda9652319aea2c342
|
[
"WTFPL"
] | null | null | null |
rivalcfg/udev.py
|
nixtux/rivalcfg
|
abe0dd650f6a592c539751cda9652319aea2c342
|
[
"WTFPL"
] | null | null | null |
rivalcfg/udev.py
|
nixtux/rivalcfg
|
abe0dd650f6a592c539751cda9652319aea2c342
|
[
"WTFPL"
] | null | null | null |
"""
This modules handles udev-related stuff on Linux. It contains function to
generate, check and update rules files.
.. NOTE::
The functions of this module must only be used with udev-based Linux distro.
"""
import re
import subprocess
from .version import VERSION
from .devices import PROFILES
#: Path to the udev rules file
RULES_FILE_PATH = "/etc/udev/rules.d/99-steelseries-rival.rules"
def generate_rules():
"""Generates the content of the udev rules file.
:rtype: str
"""
rules = "# Generated by rivalcfg v%s\n" % VERSION
rules += "# Do not edit this file. It can be regenerated with the following command:\n" # noqa
rules += "# \n"
rules += "# rivalcfg --update-udev\n\n"
for profile in PROFILES.values():
rules += "# %s\n" % profile["name"]
rules += "SUBSYSTEM==\"hidraw\", ATTRS{idVendor}==\"%04x\", ATTRS{idProduct}==\"%04x\", MODE=\"0666\"\n" % ( # noqa
profile["vendor_id"], profile["product_id"])
rules += "SUBSYSTEM==\"usb\", ATTRS{idVendor}==\"%04x\", ATTRS{idProduct}==\"%04x\", MODE=\"0666\"\n\n" % ( # noqa
profile["vendor_id"], profile["product_id"])
return rules
def write_rules_file(path=RULES_FILE_PATH):
"""Generates and write the udev rules file at the given place.
:param str path: The path of the output file.
:raise PermissionError: The user has not sufficient permissions to write
the file.
"""
path = str(path) # py27 compatibility: coerce PosixPath to string
rules = generate_rules()
with open(path, "w") as rules_file:
rules_file.write(rules)
def trigger():
"""Trigger udev to take into account the new rules."""
subprocess.check_output(["udevadm", "trigger"])
def are_rules_up_to_date(rules, current_version=VERSION):
"""Check if the given udev rules are up to date.
:param str rules: The content of an udev rule file to check.
:param str current_version: The current rivalcfg version.
:rtype: bool
"""
version_regexp = re.compile(r".*rivalcfg\s+v([0-9]+\.[0-9]+\.[0-9]+(.+)?)\s*.*") # noqa
rules_version = None
if version_regexp.match(rules):
rules_version = version_regexp.match(rules).group(1)
return rules_version == current_version
def is_rules_file_up_to_date(path=RULES_FILE_PATH):
"""Check if the given udev rules file is up to date.
:param str path: The path of the udev rules file.
:rtype: bool
"""
path = str(path) # py27 compatibility: coerce PosixPath to string
with open(path, "r") as rules_file:
return are_rules_up_to_date(rules_file.read())
| 31.423529
| 124
| 0.650693
|
fb4c7dc494f647778b9e74a8dffee751bc3f91e1
| 10,092
|
py
|
Python
|
docs/sphinx/conf.py
|
sdss/fibermeas
|
4d29ff58a14b025cf6320ab1caef5f4bcbba394b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/sphinx/conf.py
|
sdss/fibermeas
|
4d29ff58a14b025cf6320ab1caef5f4bcbba394b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/sphinx/conf.py
|
sdss/fibermeas
|
4d29ff58a14b025cf6320ab1caef5f4bcbba394b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
from pkg_resources import parse_version
try:
from fibermeas import __version__
except ModuleNotFoundError:
from sdsstools import get_package_version
__version__ = get_package_version(__file__, 'sdss-fibermeas') or 'dev'
# Are we building in RTD?
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# Sphinx template selected in cookiecutter and whether to use releases
sphinx_template = 'alabaster'
use_releases = 'no'
if sphinx_template == 'sphinx-bootstrap':
import sphinx_bootstrap_theme
# Importing matplotlib here with agg to prevent tkinter error in readthedocs
# import matplotlib
# matplotlib.use('agg')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# source_parsers = {
# '.md': 'recommonmark.parser.CommonMarkParser',
# }
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fibermeas'
copyright = '{0}, {1}'.format('2020', 'Conor Sayres')
author = 'Conor Sayres'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = parse_version(__version__).base_version
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Intersphinx mappings
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None),
'astropy': ('http://docs.astropy.org/en/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
autodoc_mock_imports = ['_tkinter']
autodoc_member_order = 'groupwise'
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = f"""
.. |numpy_array| replace:: Numpy array
.. |HDUList| replace:: :class:`~astropy.io.fits.HDUList`
.. |fibermeas_version| replace:: {__version__}
"""
# -- Options for HTML output ----------------------------------------------
html_css_files = [
'pygments.css'
]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_template == 'sphinx-bootstrap':
html_theme = 'bootstrap'
html_sidebars = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "SDSS: {0}".format(project),
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "paper",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_logo = '_static/sdssv_logo_small.png'
html_css_files += ["custom_bootstrap.css"]
html_sidebars = {'**': ['localtoc.html']}
elif sphinx_template == 'alabaster':
html_theme = 'alabaster'
html_theme_options = {
'logo': 'sdssv_logo.png',
'github_user': 'sdss',
'github_repo': project,
'github_button': True,
'github_type': 'star',
'sidebar_collapse': True,
'page_width': '80%'
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
html_css_files += ["custom.css"]
html_favicon = './_static/favicon_sdssv.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# See https://github.com/rtfd/readthedocs.org/issues/1776 for why we do this
if on_rtd:
html_static_path = []
else:
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}pdoc'.format('fibermeas')
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fibermeas', u'{0} Documentation'.format(project),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
if use_releases == 'yes':
extensions += ['sdsstools.releases']
releases_github_path = 'sdss/fibermeas'
releases_document_name = ['CHANGELOG']
releases_unstable_prehistory = True
| 30.862385
| 84
| 0.651902
|
ae6148bd6e488c9d2aa4e2b20faad988d54fd8d0
| 14,514
|
py
|
Python
|
python/cudf/dataframe/index.py
|
cmgreen210/cudf
|
89462008c24a647de457ce595ab44c0c0d758450
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/dataframe/index.py
|
cmgreen210/cudf
|
89462008c24a647de457ce595ab44c0c0d758450
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/dataframe/index.py
|
cmgreen210/cudf
|
89462008c24a647de457ce595ab44c0c0d758450
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import print_function, division
import pandas as pd
import numpy as np
import pickle
from copy import deepcopy, copy
from librmm_cffi import librmm as rmm
from . import columnops
from cudf.utils import cudautils, utils
from .buffer import Buffer
from .numerical import NumericalColumn
from .column import Column
from .datetime import DatetimeColumn
from .categorical import CategoricalColumn
from cudf.comm.serialize import register_distributed_serializer
class Index(object):
"""The root interface for all Series indexes.
"""
is_monotonic = None
is_monotonic_increasing = None
is_monotonic_decreasing = None
def serialize(self, serialize):
"""Serialize into pickle format suitable for file storage or network
transmission.
Parameters
---
serialize: A function provided by register_distributed_serializer
middleware.
"""
header = {}
header['payload'], frames = serialize(pickle.dumps(self))
header['frame_count'] = len(frames)
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
"""Convert from pickle format into Index
Parameters
---
deserialize: A function provided by register_distributed_serializer
middleware.
header: The data header produced by the serialize function.
frames: The serialized data
"""
payload = deserialize(header['payload'],
frames[:header['frame_count']])
return pickle.loads(payload)
def take(self, indices):
"""Gather only the specific subset of indices
Parameters
---
indices: An array-like that maps to values contained in this Index.
"""
assert indices.dtype.kind in 'iu'
if indices.size == 0:
# Empty indices
return RangeIndex(indices.size)
else:
# Gather
index = cudautils.gather(data=self.gpu_values, index=indices)
col = self.as_column().replace(data=Buffer(index))
return as_index(col)
def argsort(self, ascending=True):
return self.as_column().argsort(ascending=ascending)
@property
def values(self):
return np.asarray([i for i in self.as_column()])
def to_pandas(self):
return pd.Index(self.as_column().to_pandas(), name=self.name)
def to_arrow(self):
return self.as_column().to_arrow()
@property
def gpu_values(self):
return self.as_column().to_gpu_array()
def min(self):
return self.as_column().min()
def max(self):
return self.as_column().max()
def sum(self):
return self.as_column().sum()
def find_segments(self):
"""Return the beginning index for segments
Returns
-------
result : NumericalColumn
"""
segments, _ = self._find_segments()
return segments
def _find_segments(self):
seg, markers = cudautils.find_segments(self.gpu_values)
return NumericalColumn(data=Buffer(seg), dtype=seg.dtype), markers
@classmethod
def _concat(cls, objs):
data = Column._concat([o.as_column() for o in objs])
return as_index(data)
def __eq__(self, other):
if not isinstance(other, Index):
return NotImplemented
elif len(self) != len(other):
return False
lhs = self.as_column()
rhs = other.as_column()
res = lhs.unordered_compare('eq', rhs).all()
return res
def join(self, other, method, how='left', return_indexers=False):
column_join_res = self.as_column().join(
other.as_column(), how=how, return_indexers=return_indexers,
method=method)
if return_indexers:
joined_col, indexers = column_join_res
joined_index = as_index(joined_col)
return joined_index, indexers
else:
return column_join_res
class RangeIndex(Index):
"""An iterable integer index defined by a starting value and ending value.
Can be sliced and indexed arbitrarily without allocating memory for the
complete structure.
Properties
---
_start: The first value
_stop: The last value
name: Name of the index
"""
def __init__(self, start, stop=None, name=None):
"""RangeIndex(size), RangeIndex(start, stop)
Parameters
----------
start, stop: int
name: string
"""
if isinstance(start, range):
therange = start
start = therange.start
stop = therange.stop
if stop is None:
start, stop = 0, start
self._start = int(start)
self._stop = int(stop)
self.name = name
def copy(self, deep=True):
if(deep):
return deepcopy(self)
else:
return copy(self)
def __repr__(self):
return "{}(start={}, stop={})".format(self.__class__.__name__,
self._start, self._stop)
def __len__(self):
return max(0, self._stop - self._start)
def __getitem__(self, index):
if isinstance(index, slice):
start, stop = utils.normalize_slice(index, len(self))
start += self._start
stop += self._start
if index.step is None:
return RangeIndex(start, stop)
else:
return index_from_range(start, stop, index.step)
elif isinstance(index, int):
index = utils.normalize_index(index, len(self))
index += self._start
return index
else:
raise ValueError(index)
def __eq__(self, other):
if isinstance(other, RangeIndex):
return (self._start == other._start and self._stop == other._stop)
else:
return super(RangeIndex, self).__eq__(other)
@property
def dtype(self):
return np.dtype(np.int64)
@property
def _values(self):
return Column(range(self._start, self._stop))
@property
def is_contiguous(self):
return True
@property
def size(self):
return max(0, self._stop - self._start)
def find_label_range(self, first, last):
# clip first to range
if first is None or first < self._start:
begin = self._start
elif first < self._stop:
begin = first
else:
begin = self._stop
# clip last to range
if last is None:
end = self._stop
elif last < self._start:
end = begin
elif last < self._stop:
end = last + 1
else:
end = self._stop
# shift to index
return begin - self._start, end - self._start
def as_column(self):
if len(self) > 0:
vals = cudautils.arange(self._start, self._stop, dtype=self.dtype)
else:
vals = rmm.device_array(0, dtype=self.dtype)
return NumericalColumn(data=Buffer(vals), dtype=vals.dtype)
def to_gpu_array(self):
return self.as_column().to_gpu_array()
def to_pandas(self):
return pd.RangeIndex(start=self._start, stop=self._stop,
dtype=self.dtype)
def index_from_range(start, stop=None, step=None):
vals = cudautils.arange(start, stop, step, dtype=np.int64)
return as_index(vals)
class GenericIndex(Index):
"""An array of orderable values that represent the indices of another Column
Attributes
---
_values: A Column object
name: A string
"""
def __init__(self, values, name=None):
from cudf.dataframe.series import Series
# normalize the input
if isinstance(values, Series):
name = values.name
values = values._column
elif isinstance(values, columnops.TypedColumnBase):
values = values
else:
values = NumericalColumn(data=Buffer(values), dtype=values.dtype)
assert isinstance(values, columnops.TypedColumnBase), type(values)
assert values.null_count == 0
self._values = values
self.name = name
def copy(self, deep=True):
if(deep):
result = deepcopy(self)
else:
result = copy(self)
result._values = self._values.copy(deep)
return result
def serialize(self, serialize):
header = {}
header['payload'], frames = serialize(self._values)
header['frame_count'] = len(frames)
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
payload = deserialize(header['payload'],
frames[:header['frame_count']])
return cls(payload)
def __sizeof__(self):
return self._values.__sizeof__()
def __reduce__(self):
return GenericIndex, tuple([self._values])
def __len__(self):
return len(self._values)
def __repr__(self):
vals = [self._values[i] for i in range(min(len(self), 10))]
return "{}({}, dtype={})".format(self.__class__.__name__,
vals, self._values.dtype)
def __getitem__(self, index):
res = self._values[index]
if not isinstance(index, int):
return as_index(res)
else:
return res
def as_column(self):
"""Convert the index as a Series.
"""
return self._values
@property
def dtype(self):
return self._values.dtype
def find_label_range(self, first, last):
"""Find range that starts with *first* and ends with *last*,
inclusively.
Returns
-------
begin, end : 2-tuple of int
The starting index and the ending index.
The *last* value occurs at ``end - 1`` position.
"""
col = self._values
begin, end = None, None
if first is not None:
begin = col.find_first_value(first)
if last is not None:
end = col.find_last_value(last)
end += 1
return begin, end
class DatetimeIndex(GenericIndex):
# TODO this constructor should take a timezone or something to be
# consistent with pandas
def __init__(self, values, name=None):
# we should be more strict on what we accept here but
# we'd have to go and figure out all the semantics around
# pandas dtindex creation first which. For now
# just make sure we handle np.datetime64 arrays
# and then just dispatch upstream
if isinstance(values, np.ndarray) and values.dtype.kind == 'M':
values = DatetimeColumn.from_numpy(values)
elif isinstance(values, pd.DatetimeIndex):
values = DatetimeColumn.from_numpy(values.values)
self._values = values
self.name = name
@property
def year(self):
return self.get_dt_field('year')
@property
def month(self):
return self.get_dt_field('month')
@property
def day(self):
return self.get_dt_field('day')
@property
def hour(self):
return self.get_dt_field('hour')
@property
def minute(self):
return self.get_dt_field('minute')
@property
def second(self):
return self.get_dt_field('second')
def get_dt_field(self, field):
out_column = self._values.get_dt_field(field)
# columnops.column_empty_like always returns a Column object
# but we need a NumericalColumn for GenericIndex..
# how should this be handled?
out_column = NumericalColumn(data=out_column.data,
mask=out_column.mask,
null_count=out_column.null_count,
dtype=out_column.dtype)
return as_index(out_column)
class CategoricalIndex(GenericIndex):
"""An categorical of orderable values that represent the indices of another
Column
Attributes
---
_values: A CategoricalColumn object
name: A string
"""
def __init__(self, values, name=None):
if isinstance(values, pd.Series) and \
pd.api.types.is_categorical_dtype(values.dtype):
values = CategoricalColumn(
data=Buffer(values.cat.codes.values),
categories=values.cat.categories.tolist(),
ordered=values.cat.ordered
)
elif isinstance(values, (pd.Categorical, pd.CategoricalIndex)):
values = CategoricalColumn(
data=Buffer(values.codes),
categories=values.categories.tolist(),
ordered=values.ordered
)
self._values = values
self.name = name
self.names = [name]
@property
def codes(self):
return self._values.codes
@property
def categories(self):
return self._values.categories
def as_index(arbitrary, name=None):
"""Create an Index from an arbitrary object
Currently supported inputs are:
* ``Column``
* ``Buffer``
* ``Series``
* ``Index``
* numba device array
* numpy array
* pyarrow array
* pandas.Categorical
Returns
-------
result : subclass of Index
- CategoricalIndex for Categorical input.
- DatetimeIndex for Datetime input.
- GenericIndex for all other inputs.
"""
# This function should probably be moved to Index.__new__
if isinstance(arbitrary, Index):
return arbitrary
elif isinstance(arbitrary, NumericalColumn):
return GenericIndex(arbitrary, name=name)
elif isinstance(arbitrary, DatetimeColumn):
return DatetimeIndex(arbitrary, name=name)
elif isinstance(arbitrary, CategoricalColumn):
return CategoricalIndex(arbitrary, name=name)
else:
name = None
if hasattr(arbitrary, 'name'):
name = arbitrary.name
if len(arbitrary) == 0:
return RangeIndex(0, 0, name=name)
return as_index(columnops.as_column(arbitrary), name=name)
register_distributed_serializer(RangeIndex)
register_distributed_serializer(GenericIndex)
register_distributed_serializer(DatetimeIndex)
register_distributed_serializer(CategoricalIndex)
| 29.560081
| 80
| 0.604864
|
354aed27aaf45f10dc8e31abd2d9dcc25cc404f7
| 1,960
|
py
|
Python
|
pytorch-frontend/caffe2/python/operator_test/margin_ranking_criterion_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
pytorch-frontend/caffe2/python/operator_test/margin_ranking_criterion_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
pytorch-frontend/caffe2/python/operator_test/margin_ranking_criterion_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestMarginRankingCriterion(serial.SerializedTestCase):
@given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=1000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"MarginRankingCriterion", ["X1", "X2", "Y"], ["loss"],
margin=margin)
def ref_cec(X1, X2, Y):
result = np.maximum(-Y * (X1 - X2) + margin, 0)
return (result, )
inputs = [X1, X2, Y]
# This checks the op implementation against a reference function in
# python.
self.assertReferenceChecks(gc, op, inputs, ref_cec)
# This checks the op implementation over multiple device options (e.g.
# CPU and CUDA). [0] means that the 0-th output is checked.
self.assertDeviceChecks(dc, op, inputs, [0])
# Make singular points less sensitive
X1[np.abs(margin - Y * (X1 - X2)) < 0.1] += 0.1
X2[np.abs(margin - Y * (X1 - X2)) < 0.1] -= 0.1
# Check dX1
self.assertGradientChecks(gc, op, inputs, 0, [0])
# Check dX2
self.assertGradientChecks(gc, op, inputs, 1, [0])
if __name__ == "__main__":
import unittest
unittest.main()
| 36.296296
| 78
| 0.645408
|
1af64f9c6ef5279818e70fa94407c806945700c2
| 612
|
py
|
Python
|
CursoEmVideo/curso em video/ex69.py
|
elisio-ricardo/ExerciciosPythonCursoEmVideo
|
47a10b2118a76f4f95a762876ef9ab90e92f4fd3
|
[
"MIT"
] | null | null | null |
CursoEmVideo/curso em video/ex69.py
|
elisio-ricardo/ExerciciosPythonCursoEmVideo
|
47a10b2118a76f4f95a762876ef9ab90e92f4fd3
|
[
"MIT"
] | null | null | null |
CursoEmVideo/curso em video/ex69.py
|
elisio-ricardo/ExerciciosPythonCursoEmVideo
|
47a10b2118a76f4f95a762876ef9ab90e92f4fd3
|
[
"MIT"
] | null | null | null |
tot18 = toth = totm20 = 0
while True:
idade = int(input('Idade: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('SEXO: [M/F]')).strip().upper()[0]
if idade >= 18:
tot18 += 1
if sexo == 'M':
toth += 1
if sexo == 'F' and idade < 20:
totm20 += 1
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print(f'Total de pesoas com mais de 18 anos {tot18}')
print(f'Ao todo temos {toth} homens cadastrados')
print(f'Temos {totm20} mulheres com mais de 20 anos')
| 29.142857
| 69
| 0.539216
|
c9c7161908c72fc0a6cd2d30f8ed32088b41f056
| 499
|
py
|
Python
|
setup.py
|
mmbosschaert/pydelay
|
99f0890b6f80a4e3362270dfb367b3042e35f97a
|
[
"MIT"
] | 1
|
2019-07-10T02:26:37.000Z
|
2019-07-10T02:26:37.000Z
|
setup.py
|
mmbosschaert/pydelay
|
99f0890b6f80a4e3362270dfb367b3042e35f97a
|
[
"MIT"
] | null | null | null |
setup.py
|
mmbosschaert/pydelay
|
99f0890b6f80a4e3362270dfb367b3042e35f97a
|
[
"MIT"
] | 3
|
2017-07-20T15:30:06.000Z
|
2019-11-23T13:53:05.000Z
|
from distutils.core import setup
setup(name='pydelay',
version='0.1.99',
author='Valentin Flunkert',
author_email='flunkert@gmail.com',
license='MIT',
packages=['pydelay'],
package_dir={'pydelay': 'pydelay'},
package_data={'pydelay': ['doc/pyplots/*',
'doc/sphinxext/*',
'doc/Makefile',
'doc/conf.py',
'doc/README',
'doc/index.rst',
'doc/pydelay.pdf',
'examples/*.py']},
)
| 26.263158
| 49
| 0.519038
|
7d8843f697a1227b72fc1383cf1b8a74a4d20122
| 5,108
|
py
|
Python
|
dev/Tools/build/waf-1.7.13/lmbrwaflib/third_party_sync.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 8
|
2019-10-07T16:33:47.000Z
|
2020-12-07T03:59:58.000Z
|
dev/Tools/build/waf-1.7.13/lmbrwaflib/third_party_sync.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | null | null | null |
dev/Tools/build/waf-1.7.13/lmbrwaflib/third_party_sync.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 5
|
2020-08-27T20:44:18.000Z
|
2021-08-21T22:54:11.000Z
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from waflib import Utils
from waflib.Configure import conf, Logs
import os
import ConfigParser
import subprocess
# Config file values for the p4 sync configuration file
P4_SYNC_CONFIG_SECTION = "3rdParty" # The config section where the keys are stored under
P4_SYNC_CONFIG_EXECUTABLE = "p4_exe" # The value of the p4 executable (full path) on the local machine
P4_SYNC_CONFIG_HOST_PORT = "p4_port" # The host and port of the perforce server, ie perforce.com:1666
P4_SYNC_CONFIG_WORKSPACE = "p4_workspace" # The client workspace that has the 3rd party root mapped
P4_SYNC_CONFIG_REPOSITORY = "p4_repo" # The location of the repo in the perforce host that references the root of the 3rd party folder
class P4SyncThirdPartySettings:
"""
This class manages a p4 settings that will attempt to sync any missing local 3rd party libraries from a perforce
repository
"""
def __init__(self, local_3rd_party_root, config_file):
"""
Initialize the sync settings object to a config file. This will also attempt to connect to the perforce host
via the settings if any to determine the local workspace root as well. If all goes well, then the object
will be marked as valid (see is_valid())
"""
self.valid = False
if not os.path.exists(config_file):
return
try:
config_parser = ConfigParser.ConfigParser()
config_parser.read(config_file)
self.p4_exe = config_parser.get(P4_SYNC_CONFIG_SECTION, P4_SYNC_CONFIG_EXECUTABLE)
self.p4_port = config_parser.get(P4_SYNC_CONFIG_SECTION, P4_SYNC_CONFIG_HOST_PORT)
self.p4_workspace = config_parser.get(P4_SYNC_CONFIG_SECTION, P4_SYNC_CONFIG_WORKSPACE)
self.p4_repo = config_parser.get(P4_SYNC_CONFIG_SECTION, P4_SYNC_CONFIG_REPOSITORY)
except ConfigParser.Error as err:
Logs.warn('[WARN] Error reading p4 sync settings file {}. ({}). '
'Missing 3rd party libraries will not be synced from perforce'.format(config_file, str(err)))
return
try:
proc = subprocess.Popen([self.p4_exe, '-p', self.p4_port, 'client', '-o', self.p4_workspace],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in iter(proc.stdout.readline, b''):
if line.startswith("Root:"):
self.client_root = line.replace("Root:", "").strip()
break
except OSError as err:
Logs.warn('[WARN] Invalid values in the p4 sync settings file {}. ({}). '
'Missing 3rd party libraries will not be synced from perforce'.format(config_file, str(err)))
return
normalized_client_root = os.path.normcase(self.client_root)
normalized_3rd_party_root = os.path.normcase(local_3rd_party_root)
if not normalized_3rd_party_root.startswith(normalized_client_root):
Logs.warn('[WARN] Local 3rd Party root ({}) does not match the root from workspace "{}" ({})'
'Missing 3rd party libraries will not be synced from perforce'.format(local_3rd_party_root,
self.p4_workspace,
self.client_root))
return
self.valid = True
def is_valid(self):
return self.valid
def sync_3rd_party(self, third_party_subpath):
Logs.info("[INFO] Syncing library {} from perforce...".format(third_party_subpath))
try:
timer = Utils.Timer()
sync_proc = subprocess.Popen([self.p4_exe,
'-p', self.p4_port,
'-c', self.p4_workspace,
'sync',
'-f', "{}/{}/...".format(self.p4_repo, third_party_subpath.rstrip('/'))],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sync_out, sync_err = sync_proc.communicate()
if not sync_err:
Logs.info("[INFO] Library {} synced. ({})".format(third_party_subpath, str(timer)))
except OSError as err:
Logs.warn("[WARN] Unable to sync 3rd party path {}: {}".format(third_party_subpath, str(err)))
| 48.647619
| 140
| 0.608066
|
5a2e94eb896d3867684309b7eda0b162851d13fc
| 1,301
|
py
|
Python
|
python/Flask/Quizz-App/app.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | 1
|
2021-12-03T09:23:41.000Z
|
2021-12-03T09:23:41.000Z
|
python/Flask/Quizz-App/app.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | null | null | null |
python/Flask/Quizz-App/app.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | null | null | null |
from flask import Flask,render_template,request
from main import firebase
from flask import redirect
app = Flask(__name__)
db=firebase.database()
#Global Varible
global i
i=0
points=0
data=db.child('quizz').child('questions').get()
@app.route('/')
def hello_world():
try:
global i
q=data.val()[i]
question=q['question']
option_a=q['answers'][0]
option_b=q['answers'][1]
option_c=q['answers'][2]
option_d=q['answers'][3]
return render_template('index.html',Question=question,Option_a=option_a,Option_b=option_b,Option_c=option_c,Option_d=option_d)
except:
return render_template('scoreboard.html',Points=points)
@app.route('/action-submit',methods=['POST','GET'])
def submit():
try:
if request.method=='GET':
global i
selectedValue=request.args.get('answer')
index=data.val()[i]['correctIndex']
test=data.val()[i]['answers'][index]
print(test)
if(selectedValue==data.val()[i]['answers'][index]):
global points
points=points+1
i=i+1
return hello_world()
except:
return Exception
if __name__ == '__main__':
app.run()
| 26.55102
| 135
| 0.583397
|
304257ff552a1e1ec8eeb6a109a15c6bd03991e0
| 794
|
py
|
Python
|
apps/comment/admin.py
|
Moingpony/my_izone
|
bfcf870fe306a0d9483cb2c6eb6587e711c41914
|
[
"MIT"
] | null | null | null |
apps/comment/admin.py
|
Moingpony/my_izone
|
bfcf870fe306a0d9483cb2c6eb6587e711c41914
|
[
"MIT"
] | 9
|
2020-03-24T17:00:12.000Z
|
2022-03-11T23:44:52.000Z
|
apps/comment/admin.py
|
Moingpony/my_izone
|
bfcf870fe306a0d9483cb2c6eb6587e711c41914
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import ArticleComment, Notification
@admin.register(ArticleComment)
class CommentAdmin(admin.ModelAdmin):
date_hierarchy = 'create_date'
list_display = ('id', 'author', 'belong', 'create_date', 'show_content')
list_filter = ('author', 'belong',)
ordering = ('-id',)
# 设置需要添加a标签的字段
list_display_links = ('id', 'show_content')
# 使用方法来自定义一个字段,并且给这个字段设置一个名称
def show_content(self, obj):
return obj.content[:30]
show_content.short_description = '评论内容'
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
date_hierarchy = 'create_date'
list_display = ('id', 'create_p', 'create_date', 'comment', 'is_read')
list_filter = ('create_p', 'is_read',)
| 30.538462
| 77
| 0.677582
|
cd8957fb41da3c0954c677428aed33063f9954cd
| 678
|
py
|
Python
|
Projetos/Ex9.py
|
rharcosta/python-projects
|
bafe6b2f7ebfd9591cff37a4d2936da210b804f5
|
[
"MIT"
] | null | null | null |
Projetos/Ex9.py
|
rharcosta/python-projects
|
bafe6b2f7ebfd9591cff37a4d2936da210b804f5
|
[
"MIT"
] | null | null | null |
Projetos/Ex9.py
|
rharcosta/python-projects
|
bafe6b2f7ebfd9591cff37a4d2936da210b804f5
|
[
"MIT"
] | null | null | null |
import numpy as np
#Ler duas listas e gerar uma terceira sem elementos repetidos
lista1 = []
lista2 = []
lista3 = []
print()
elementos = int(input('Quantos elementos você quer na listas? '))
for i in range (1, elementos + 1):
valor = int(input('Digite o {} valor para ser inserido na primeira lista: '.format(i)))
if valor == 0:
break
lista1.append(valor)
print('-' * 30)
for j in range (1, elementos + 1):
valor = int(input('Digite o {} valor para ser inserido na segunda lista: '.format(j)))
if valor == 0:
break
lista2.append(valor)
lista3 = np.unique(lista1 + lista2)
print('\nA soma das listas sem repetição é: {}\n'.format(lista3))
| 33.9
| 91
| 0.654867
|
47e4425d3473d73ce9452268a4c4bcac2d25de5a
| 1,251
|
py
|
Python
|
tests/functional/regressions/issue228/b_module.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tests/functional/regressions/issue228/b_module.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tests/functional/regressions/issue228/b_module.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
from typing import Any, Callable, Dict, Optional
from tartiflette import Directive, Resolver, Scalar
_SDL = """
scalar NinjaGo
directive @Blah on FIELD_DEFINITION
type Lol {
ninja: NinjaGo @Blah
}
"""
class BlahDir:
def __init__(self, config):
self._blah_value = config["val"]
async def on_field_execution(
self,
directive_args: Dict[str, Any],
next_resolver: Callable,
parent: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Any],
info: "ResolveInfo",
):
return (
await next_resolver(parent, args, ctx, info)
+ " B"
+ self._blah_value
)
class NinjaGo:
@staticmethod
def coerce_output(val):
return val + "GO !B"
@staticmethod
def coerce_input(val):
return val
@staticmethod
def parse_literal(ast):
return ast.value
async def resolver_of_lol_ninja(pr, *_args, **_kwargs):
return pr["ninja"] + " NinjaB"
async def bake(schema_name, config):
Directive("Blah", schema_name=schema_name)(BlahDir(config))
Scalar("NinjaGo", schema_name=schema_name)(NinjaGo)
Resolver("Lol.ninja", schema_name=schema_name)(resolver_of_lol_ninja)
return _SDL
| 21.20339
| 73
| 0.633094
|
872507cb69d1ed7e0fc40038305ebddd4e50209c
| 121
|
py
|
Python
|
loadbar/__init__.py
|
ValentinVignal/LoadBar
|
aa951f4d123995adbf115c1f2a3555a7f11a2b56
|
[
"MIT"
] | 5
|
2020-05-27T09:23:15.000Z
|
2022-03-24T16:16:01.000Z
|
loadbar/__init__.py
|
ValentinVignal/LoadBar
|
aa951f4d123995adbf115c1f2a3555a7f11a2b56
|
[
"MIT"
] | null | null | null |
loadbar/__init__.py
|
ValentinVignal/LoadBar
|
aa951f4d123995adbf115c1f2a3555a7f11a2b56
|
[
"MIT"
] | null | null | null |
from .LoadBar import LoadBar
from .ColorBar import ColorBar
from .Colors import Colors
from .RainbowBar import RainbowBar
| 30.25
| 34
| 0.842975
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.