hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03ebda09bbc22220b71fe43ac3d4374f579cc7d2
| 1,811
|
py
|
Python
|
django_oac/middleware.py
|
przemekk1385/django-oac
|
379b29921551ea1d53edb5e3fbb6fa8d3c972acb
|
[
"MIT"
] | null | null | null |
django_oac/middleware.py
|
przemekk1385/django-oac
|
379b29921551ea1d53edb5e3fbb6fa8d3c972acb
|
[
"MIT"
] | 3
|
2022-03-02T18:08:13.000Z
|
2022-03-02T18:29:33.000Z
|
django_oac/middleware.py
|
przemekk1385/django_oac
|
379b29921551ea1d53edb5e3fbb6fa8d3c972acb
|
[
"MIT"
] | null | null | null |
from logging import Logger
from typing import Callable, Type
from django.contrib.auth import logout
from django.http.request import HttpRequest
from django.http.response import HttpResponseBase
from .conf import settings as oac_settings
from .decorators import populate_method_logger as populate_logger
from .exceptions import ProviderResponseError
from .models_providers.token_provider import TokenProviderBase
TokenProvider = oac_settings.TOKEN_PROVIDER_CLASS
# pylint: disable=too-few-public-methods
class OAuthClientMiddleware:
def __init__(
self,
get_response: Callable,
token_provider: TokenProviderBase = TokenProvider(),
) -> None:
self.get_response = get_response
self.token_provider = token_provider
@populate_logger
def __call__(self, request: HttpRequest, logger: Logger) -> Type[HttpResponseBase]:
user = request.user
if user.is_authenticated:
token = user.token_set.last()
if token and token.has_expired:
logger.info(f"access token for user '{user.email}' has expired")
try:
self.token_provider.refresh(token)
except ProviderResponseError as err:
logger.error(f"raised ProviderResponseError: {err}")
token.delete()
logout(request)
else:
logger.info(
f"access token for user '{user.email}' has been refreshed"
)
elif not token:
logger.info(f"no access token found for user '{user.email}'")
else:
logger.debug(f"access token for user '{user.email}' is valid")
response = self.get_response(request)
return response
| 34.826923
| 87
| 0.639978
|
a4d18b5435ff15ecdb80f9c4d06afdb9aea5da6e
| 265
|
py
|
Python
|
where the magic happens/AddRow.py
|
jmolloy1/instawizardry
|
67923e6e44a835e5d319c0067259740172075959
|
[
"MIT"
] | null | null | null |
where the magic happens/AddRow.py
|
jmolloy1/instawizardry
|
67923e6e44a835e5d319c0067259740172075959
|
[
"MIT"
] | null | null | null |
where the magic happens/AddRow.py
|
jmolloy1/instawizardry
|
67923e6e44a835e5d319c0067259740172075959
|
[
"MIT"
] | null | null | null |
import sys
import csv
with open('Posts.csv','r') as postFile:
posts = csv.writer(postFile)
def AddRow(row):
with open('Posts.csv','w') as postFile:
posts = csv.writer(postFile)
print(row)
posts.writerow([row])
| 17.666667
| 44
| 0.573585
|
189d7c986094ef17d479bd5d3fc6425030a64fcc
| 1,414
|
py
|
Python
|
awacs/lookoutvision.py
|
mtrspringer/awacs
|
a5d2fe37c2a468a977536c4d6e66dda7da69717f
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/lookoutvision.py
|
mtrspringer/awacs
|
a5d2fe37c2a468a977536c4d6e66dda7da69717f
|
[
"BSD-2-Clause"
] | 19
|
2020-11-30T06:43:54.000Z
|
2022-02-21T09:02:54.000Z
|
awacs/lookoutvision.py
|
mtrspringer/awacs
|
a5d2fe37c2a468a977536c4d6e66dda7da69717f
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Lookout for Vision'
prefix = 'lookoutvision'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateDataset = Action('CreateDataset')
CreateModel = Action('CreateModel')
CreateProject = Action('CreateProject')
DeleteDataset = Action('DeleteDataset')
DeleteModel = Action('DeleteModel')
DeleteProject = Action('DeleteProject')
DescribeDataset = Action('DescribeDataset')
DescribeModel = Action('DescribeModel')
DescribeProject = Action('DescribeProject')
DescribeTrialDetection = Action('DescribeTrialDetection')
DetectAnomalies = Action('DetectAnomalies')
ListDatasetEntries = Action('ListDatasetEntries')
ListModels = Action('ListModels')
ListProjects = Action('ListProjects')
ListTrialDetections = Action('ListTrialDetections')
StartModel = Action('StartModel')
StartTrialDetection = Action('StartTrialDetection')
StopModel = Action('StopModel')
UpdateDatasetEntries = Action('UpdateDatasetEntries')
| 31.422222
| 70
| 0.750354
|
eeb19140e302eedae6b5ab49a599484691fcb244
| 17,718
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20190901/security_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20190901/security_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20190901/security_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityRule']
class SecurityRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'SecurityRuleAccess']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
destination_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityRuleDirection']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityRuleProtocol']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: The network traffic is allowed or denied.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] destination_application_security_groups: The application security group specified as destination.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Network protocol this rule applies to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] source_application_security_groups: The application security group specified as source.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if access is None and not opts.urn:
raise TypeError("Missing required property 'access'")
__props__['access'] = access
__props__['description'] = description
__props__['destination_address_prefix'] = destination_address_prefix
__props__['destination_address_prefixes'] = destination_address_prefixes
__props__['destination_application_security_groups'] = destination_application_security_groups
__props__['destination_port_range'] = destination_port_range
__props__['destination_port_ranges'] = destination_port_ranges
if direction is None and not opts.urn:
raise TypeError("Missing required property 'direction'")
__props__['direction'] = direction
__props__['id'] = id
__props__['name'] = name
if network_security_group_name is None and not opts.urn:
raise TypeError("Missing required property 'network_security_group_name'")
__props__['network_security_group_name'] = network_security_group_name
__props__['priority'] = priority
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['security_rule_name'] = security_rule_name
__props__['source_address_prefix'] = source_address_prefix
__props__['source_address_prefixes'] = source_address_prefixes
__props__['source_application_security_groups'] = source_application_security_groups
__props__['source_port_range'] = source_port_range
__props__['source_port_ranges'] = source_port_ranges
__props__['etag'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/latest:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150615:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200801:SecurityRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityRule, __self__).__init__(
'azure-nextgen:network/v20190901:SecurityRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityRule':
"""
Get an existing SecurityRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SecurityRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the security rule resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 60.061017
| 2,495
| 0.697313
|
26077653b49bee3790983a87e9f462693160f6c9
| 77,975
|
py
|
Python
|
cctbx/maptbx/__init__.py
|
toastisme/cctbx_project
|
d1a25147b5958822b6923fb55260749ccf9350ff
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-02-01T14:25:48.000Z
|
2021-09-15T16:36:29.000Z
|
cctbx/maptbx/__init__.py
|
toastisme/cctbx_project
|
d1a25147b5958822b6923fb55260749ccf9350ff
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-06-14T17:04:17.000Z
|
2019-06-24T20:54:12.000Z
|
cctbx/maptbx/__init__.py
|
toastisme/cctbx_project
|
d1a25147b5958822b6923fb55260749ccf9350ff
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2022-02-08T10:11:07.000Z
|
2022-02-08T10:11:07.000Z
|
from __future__ import absolute_import, division, print_function
import cctbx.sgtbx
import boost_adaptbx.boost.python as bp
from six.moves import range
from six.moves import zip
ext = bp.import_ext("cctbx_maptbx_ext")
from cctbx_maptbx_ext import *
from cctbx import crystal
from cctbx import sgtbx
from cctbx.array_family import flex
from scitbx import matrix
from scitbx.python_utils import dicts
from libtbx import adopt_init_args
from libtbx.utils import Sorry
import libtbx.load_env
import math
import sys, os
import scitbx.math
from cctbx import adptbx
from libtbx import group_args
from scitbx import fftpack
from libtbx.test_utils import approx_equal
from cctbx import uctbx
import scitbx.math
debug_peak_cluster_analysis = os.environ.get(
"CCTBX_MAPTBX_DEBUG_PEAK_CLUSTER_ANALYSIS", "")
@bp.inject_into(connectivity)
class _():
def get_blobs_boundaries_tuples(self):
"""
get lists of minimum and maximum coordinates for each connected
region.
returns 2 lists of tuples: first is minimum, second is maximum coordinates.
[(x0, y0, z0), (x1, y1, z1), ...] where 0, 1, ... - number of region
"""
boundaries = self.get_blobs_boundaries()
regs = self.regions()
min_boundaries = []
max_boundaries = []
for i in range(len(regs)):
minb = (boundaries[0, i, 0], boundaries[0, i, 1], boundaries[0, i, 2])
maxb = (boundaries[1, i, 0], boundaries[1, i, 1], boundaries[1, i, 2])
min_boundaries.append(minb)
max_boundaries.append(maxb)
return min_boundaries, max_boundaries
def smooth_map(map, crystal_symmetry, rad_smooth, method = "exp",
non_negative = True):
from cctbx import miller
assert method in ["exp", "box_average"]
map_smooth = None
if(method == "exp"):
f_map = miller.structure_factor_box_from_map(
map = map,
crystal_symmetry = crystal_symmetry,
include_000 = True)
ddd = f_map.d_spacings().data()
ddd.set_selected(ddd == -1 , 1.e+10) # d_spacing for (0, 0, 0) was set to -1
ss = 1./flex.pow2(ddd) / 4.
b_smooth = 8*math.pi**2*rad_smooth**2
smooth_scale = flex.exp(-b_smooth*ss)
f_map = f_map.array(data = f_map.data()*smooth_scale)
cg = crystal_gridding(
unit_cell = crystal_symmetry.unit_cell(),
space_group_info = crystal_symmetry.space_group_info(),
pre_determined_n_real = map.all())
fft_map = miller.fft_map(
crystal_gridding = cg,
fourier_coefficients = f_map)
fft_map.apply_volume_scaling()
map_smooth = fft_map.real_map_unpadded()
if non_negative:
map_smooth = map_smooth.set_selected(map_smooth<0., 0)
elif(method == "box_average"): # assume 0/1 binary map
assert abs(flex.max(map)-1.)<1.e-6
mmin = flex.min(map)
assert mmin<1.e-6 and mmin>= 0.0
map_smooth = map.deep_copy()
for i in range(3):
maptbx.map_box_average(
map_data = map_smooth,
index_span = 1)
for i in range(3):
maptbx.map_box_average(
map_data = map_smooth,
cutoff = 0.99,
index_span = 1)
return map_smooth
class d99(object):
def __init__(self, map = None, f_map = None, crystal_symmetry = None):
adopt_init_args(self, locals())
if(map is not None):
assert f_map is None
assert crystal_symmetry is not None
map = shift_origin_if_needed(map_data = map).map_data
from cctbx import miller
self.f_map = miller.structure_factor_box_from_map(
map = map, crystal_symmetry = crystal_symmetry)
else:
assert [map, crystal_symmetry].count(None) == 2
self.d_spacings = self.f_map.d_spacings().data()
self.d_max, self.d_min = flex.max(self.d_spacings), flex.min(self.d_spacings)
o = ext.d99(
f = self.f_map.data(),
d_spacings = self.d_spacings,
hkl = self.f_map.indices(),
cutoff = 0.99)
self.result = group_args(
d99 = o.d_min())
def show(self, log):
fmt = "%12.6f %8.6f"
for d_min, cc in zip(self.result.d_mins, self.result.ccs):
print(fmt%(d_min, cc), file = log)
def assert_same_gridding(map_1, map_2,
Sorry_message = "Maps have different gridding."):
f1 = map_1.focus() == map_2.focus()
f2 = map_1.origin() == map_2.origin()
f3 = map_1.all() == map_2.all()
if([f1, f2, f3].count(True)!= 3):
raise Sorry(Sorry_message)
def shift_origin_if_needed(map_data = None,
sites_cart = None,
crystal_symmetry = None,
ncs_object = None,
origin_grid_units = None,
n_xyz = None,
):
if not map_data:
assert origin_grid_units and n_xyz
shift_needed = True
else: # usual
shift_needed = not \
(map_data.focus_size_1d() > 0 and map_data.nd() == 3 and
map_data.is_0_based())
shift_frac = None
shift_cart = None
if(shift_needed):
if map_data:
N = map_data.all()
O = map_data.origin()
map_data = map_data.shift_origin()
else:
N = n_xyz
O = origin_grid_units
original_origin_grid_units = O
original_origin_cart = (0, 0, 0)
if crystal_symmetry:
if(not crystal_symmetry.space_group().type().number() in [0, 1]):
raise Sorry("Space groups other than P1 are not supported.")
a, b, c = crystal_symmetry.unit_cell().parameters()[:3]
fm = crystal_symmetry.unit_cell().fractionalization_matrix()
sx, sy, sz = O[0]/N[0], O[1]/N[1], O[2]/N[2]
shift_frac = [-sx, -sy, -sz]
shift_cart = crystal_symmetry.unit_cell().orthogonalize(shift_frac)
original_origin_cart = tuple(-matrix.col(shift_cart))
if(sites_cart is not None):
sites_cart = sites_cart + flex.vec3_double(sites_cart.size(), shift_cart)
if ncs_object:
ncs_object = ncs_object.deep_copy(coordinate_offset = shift_cart)
else:
original_origin_grid_units = None
original_origin_cart = None
else:
original_origin_grid_units = (0, 0, 0)
original_origin_cart = (0, 0, 0)
return group_args(
map_data = map_data,
ncs_object = ncs_object,
sites_cart = sites_cart,
shift_frac = shift_frac,
shift_cart = shift_cart,
original_origin_grid_units = original_origin_grid_units,
original_origin_cart = original_origin_cart)
def value_at_closest_grid_point(map, x_frac):
return map[closest_grid_point(map.accessor(), x_frac)]
flex.int.value_at_closest_grid_point = value_at_closest_grid_point
flex.double.value_at_closest_grid_point = value_at_closest_grid_point
flex.double.eight_point_interpolation = eight_point_interpolation
flex.double.eight_point_interpolation_with_gradients = \
eight_point_interpolation_with_gradients
flex.double.quadratic_interpolation_with_gradients = \
quadratic_interpolation_with_gradients
flex.double.tricubic_interpolation = tricubic_interpolation
flex.double.tricubic_interpolation_with_gradients = tricubic_interpolation_with_gradients
def cc_peak(cutoff, map_1 = None, map_2 = None, map_coeffs_1 = None, map_coeffs_2 = None):
"""
Compute CCpeak as described in
Acta Cryst. (2014). D70, 2593-2606
Metrics for comparison of crystallographic maps
A. Urzhumtsev, P. V. Afonine, V. Y. Lunin, T. C. Terwilliger and P. D. Adams
"""
from cctbx import miller
assert [map_1, map_2].count(None) in [0, 2]
assert [map_coeffs_1, map_coeffs_2].count(None) in [0, 2]
if([map_1, map_2].count(None) == 0):
# Maps are assumed to be quantile rank scaled (HE).
return ext.cc_peak(map_1 = map_1, map_2 = map_2, cutoff = cutoff)
elif([map_coeffs_1, map_coeffs_2].count(None) == 0):
d_min = min(map_coeffs_1.d_min(), map_coeffs_2.d_min())
crystal_gridding = map_coeffs_1.crystal_gridding(
d_min = d_min,
symmetry_flags = use_space_group_symmetry,
resolution_factor = 0.25)
fft_map = miller.fft_map(
crystal_gridding = crystal_gridding,
fourier_coefficients = map_coeffs_1)
map_1 = fft_map.real_map_unpadded()
fft_map = miller.fft_map(
crystal_gridding = crystal_gridding,
fourier_coefficients = map_coeffs_2)
map_2 = fft_map.real_map_unpadded()
m1_he = volume_scale(map = map_1, n_bins = 10000).map_data()
m2_he = volume_scale(map = map_2, n_bins = 10000).map_data()
return ext.cc_peak(map_1 = m1_he, map_2 = m2_he, cutoff = cutoff)
else:
raise Sorry("Combination of inputs not supported.")
def map_accumulator(n_real, use_max_map, smearing_b = 5, max_peak_scale = 2,
smearing_span = 10, use_exp_table = True):
"""
Good defaults for 2mFo-DFc type maps:
smearing_b = 1, max_peak_scale = 100, smearing_span = 5
"""
return ext.map_accumulator(n_real = n_real, smearing_b = smearing_b,
max_peak_scale = max_peak_scale, smearing_span = smearing_span,
use_exp_table = use_exp_table, use_max_map = use_max_map)
def peak_volume_estimate(map_data, sites_cart, crystal_symmetry, cutoff,
atom_radius = 1.5):
v = flex.double()
sites_frac = crystal_symmetry.unit_cell().fractionalize(sites_cart)
for sc, sf in zip(sites_cart, sites_frac):
if(map_data.value_at_closest_grid_point(sf)>= cutoff):
sel = grid_indices_around_sites(
unit_cell = crystal_symmetry.unit_cell(),
fft_n_real = map_data.focus(),
fft_m_real = map_data.all(),
sites_cart = flex.vec3_double([sc]),
site_radii = flex.double([atom_radius]*1))
v.append((map_data.select(sel)>= cutoff).count(True))
r = flex.min_default(v, None)
if(r == 0): return None
return r
def truncate(map_data, by_sigma_less_than, scale_by, set_value = 0):
"""
Trunate map inplace by standard deviation (sigma) while scale it with
specified scale, such as volume (scale_by = 1/volume) or sigma
(scale_by = 1/standard_deviation). Input map_data is expected to be unscaled (
right out of FT).
"""
sigma = statistics(map_data).sigma()
if(sigma == 0):
map_data = map_data*scale_by
return
ext.truncate(
map_data = map_data,
standard_deviation = sigma,
by_sigma_less_than = by_sigma_less_than,
scale_by = scale_by,
set_value = set_value)
def mask(xray_structure,
n_real,
mask_value_inside_molecule = 0,
mask_value_outside_molecule = 1,
solvent_radius = 0,
atom_radius = None):
xrs_p1 = xray_structure.expand_to_p1(sites_mod_positive = True)
if(atom_radius is None):
from cctbx.masks import vdw_radii_from_xray_structure
atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs_p1)
else:
atom_radii = flex.double(xrs_p1.scatterers().size(), atom_radius)
return ext.mask(
sites_frac = xrs_p1.sites_frac(),
unit_cell = xrs_p1.unit_cell(),
n_real = n_real,
mask_value_inside_molecule = mask_value_inside_molecule,
mask_value_outside_molecule = mask_value_outside_molecule,
radii = atom_radii + solvent_radius)
class statistics(ext.statistics):
def __init__(self, map):
ext.statistics.__init__(self, map)
@bp.inject_into(ext.statistics)
class _():
def show_summary(self, f = None, prefix = ""):
if (f is None): f = sys.stdout
print(prefix + "max %.6g" % (self.max()), file = f)
print(prefix + "min %.6g" % (self.min()), file = f)
print(prefix + "mean %.6g" % (self.mean()), file = f)
print(prefix + "sigma %.6g" % (self.sigma()), file = f)
use_space_group_symmetry = sgtbx.search_symmetry_flags(
use_space_group_symmetry = True)
@bp.inject_into(ext.histogram)
class _():
"""
Injector for extending cctbx.maptbx.histogram
"""
# XXX make a method of scitbx
def get_percentile_cutoffs(self, map, vol_cutoff_plus_percent,
vol_cutoff_minus_percent):
"""
For the double-step filtration in cctbx.miller (used as part of the
procedure for replacing missing F-obs in maps), we need to calculate upper
and lower cutoffs for the data based on percentile values. This can be
done in just a few lines of code by using flex.sort_permutation over the
entire map, but this has a huge memory overhead (and possibly computational
overhead as well). Since we are only interested in subsets of values at
the extreme ends of the distribution, we can perform the sort for these
subsets instead, which should cut down on memory use.
Returns the upper and lower map value cutoffs (as Python floats).
"""
map_values = map.as_1d()
size = map_values.size()
# upper limit
i_bin_plus = -1
for i_bin, value in enumerate(self.v_values()):
if ((value*100) <= vol_cutoff_plus_percent):
i_bin_plus = i_bin - 1
break
assert (i_bin_plus >= 0)
cutoffp_lower_limit = self.arguments()[i_bin_plus]
top_values = map_values.select(map_values >= cutoffp_lower_limit)
i_upper = min(int(size * (vol_cutoff_plus_percent / 100.)),
top_values.size())
s = flex.sort_permutation(top_values)
top_values_sorted = top_values.select(s)
del s
assert (top_values_sorted.size() >= i_upper)
cutoffp = top_values_sorted[-i_upper]
del top_values
del top_values_sorted
# lower limit
i_bin_minus = -1
for i_bin, value in enumerate(self.c_values()):
if ((value*100) > vol_cutoff_minus_percent):
i_bin_minus = i_bin
break
assert (i_bin_minus >= 0)
cutoffm_upper_limit = self.arguments()[i_bin_minus]
bottom_values = map_values.select(map_values <= cutoffm_upper_limit)
i_lower = min(int(size * (vol_cutoff_minus_percent / 100.)),
bottom_values.size() - 1)
s = flex.sort_permutation(bottom_values)
bottom_values_sorted = bottom_values.select(s)
del s
assert (bottom_values_sorted.size() > i_lower)
cutoffm = bottom_values_sorted[i_lower]
del bottom_values
del bottom_values_sorted
return cutoffp, cutoffm
class peak_list(ext.peak_list):
def __init__(self, data,
tags,
peak_search_level = 1,
max_peaks = 0,
peak_cutoff = None,
interpolate = True):
if (peak_cutoff is None):
ext.peak_list.__init__(self,
data, tags, peak_search_level, max_peaks, interpolate)
else:
ext.peak_list.__init__(self,
data, tags, peak_search_level, peak_cutoff, max_peaks, interpolate)
def as_CObjectZYX(map_unit_cell, first, last, apply_sigma_scaling = True):
return ext.as_CObjectZYX(map_unit_cell, first, last, apply_sigma_scaling)
structure_factors = dicts.easy(
to_map = structure_factors_to_map,
from_map = structure_factors_from_map)
class crystal_gridding(object):
def __init__(self, unit_cell,
d_min = None,
resolution_factor = None,
step = None,
symmetry_flags = None,
space_group_info = None,
mandatory_factors = None,
max_prime = 5,
assert_shannon_sampling = True,
pre_determined_n_real = None):
if (pre_determined_n_real is None):
assert [d_min, step].count(None) == 1
if (step is not None):
d_min = step*2
resolution_factor = 0.5
elif (resolution_factor is None):
resolution_factor = 1/3
if (symmetry_flags is not None): assert space_group_info is not None
if (mandatory_factors is None): mandatory_factors = (1, 1, 1)
assert len(mandatory_factors) == 3
else:
assert d_min is None
assert step is None
assert mandatory_factors is None
adopt_init_args(self, locals(), hide = True)
if (pre_determined_n_real is not None):
self._n_real = pre_determined_n_real
elif (symmetry_flags is not None):
self._n_real = determine_gridding(
unit_cell, d_min, resolution_factor,
symmetry_flags, space_group_info.type(),
mandatory_factors, max_prime, assert_shannon_sampling)
else:
self._n_real = determine_gridding(
unit_cell, d_min, resolution_factor,
mandatory_factors, max_prime, assert_shannon_sampling)
def _copy_constructor(self, other):
self._unit_cell = other._unit_cell
self._d_min = other._d_min
self._resolution_factor = other._resolution_factor
self._symmetry_flags = other._symmetry_flags
self._space_group_info = other._space_group_info
self._mandatory_factors = other._mandatory_factors
self._max_prime = other._max_prime
self._n_real = other._n_real
def unit_cell(self):
return self._unit_cell
def d_min(self):
return self._d_min
def resolution_factor(self):
return self._resolution_factor
def symmetry_flags(self):
return self._symmetry_flags
def space_group_info(self):
return self._space_group_info
def change_space_group(self, space_group_info):
assert (space_group_info.group().refine_gridding(self.n_real())
== self.n_real())
self._space_group_info = space_group_info
def mandatory_factors(self):
return self._mandatory_factors
def max_prime(self):
return self._max_prime
def n_real(self):
return self._n_real
def space_group(self):
assert self.space_group_info() is not None
return self.space_group_info().group()
def crystal_symmetry(self):
assert self.space_group_info() is not None
return crystal.symmetry(
unit_cell = self.unit_cell(),
space_group_info = self.space_group_info())
def n_grid_points(self):
result = 1
for n in self.n_real():
result *= n
return result
def tags(self):
return crystal_gridding_tags(self)
class crystal_gridding_tags(crystal_gridding):
def __init__(self, gridding):
crystal_gridding._copy_constructor(self, gridding)
assert gridding.symmetry_flags() is not None
self._tags = grid_tags(dim = self.n_real())
self._tags.build(
space_group_type = self.space_group_info().type(),
symmetry_flags = self.symmetry_flags())
assert self._tags.n_grid_misses() == 0
def tags(self):
return self._tags
def peak_search(self, parameters, map, verify_symmetry = True):
if (parameters is None):
parameters = peak_search_parameters()
if (verify_symmetry and libtbx.env.full_testing):
assert self._tags.verify(map)
if (map.accessor().is_padded()):
map = copy(map, flex.grid(map.focus()))
grid_peaks = peak_list(
data = map,
tags = self._tags.tag_array(),
peak_search_level = parameters.peak_search_level(),
max_peaks = parameters.max_peaks(),
peak_cutoff = parameters.peak_cutoff(),
interpolate = parameters.interpolate())
if (parameters.min_distance_sym_equiv() is None):
return grid_peaks
return peak_cluster_analysis(
peak_list = grid_peaks,
special_position_settings = crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry(),
min_distance_sym_equiv = parameters.min_distance_sym_equiv()),
general_positions_only = parameters.general_positions_only(),
effective_resolution = parameters.effective_resolution(),
significant_height_fraction = parameters.significant_height_fraction(),
cluster_height_fraction = parameters.cluster_height_fraction(),
min_cross_distance = parameters.min_cross_distance(),
max_clusters = parameters.max_clusters(),
min_cubicle_edge = parameters.min_cubicle_edge())
class boxes_by_dimension(object):
def __init__(self,
n_real,
abc,
dim,
log = None,
prefix = ""):
self.n_real = n_real
#
step_1 = abc[0]/n_real[0] # step size along edge
step_2 = abc[1]/n_real[1] # step size along edge
step_3 = abc[2]/n_real[2] # step size along edge
i_step_1 = int(dim/step_1) # points per box edge
i_step_2 = int(dim/step_2) # points per box edge
i_step_3 = int(dim/step_3) # points per box edge
#
n_boxes = self._generate_boxes(i_step_1, i_step_2, i_step_3)
assert n_boxes == len(self.starts)
def _generate_boxes(self, ba, bb, bc):
def regroup(be):
maxe = be[len(be)-1][1]
step = int(maxe/len(be))
result = []
for i in range(len(be)):
if(i == 0):
l = 0
r = step
elif(i == len(be)-1):
l = i*step
r = maxe
else:
l = i*step
r = (i+1)*step
result.append([l, r])
return result
be = []
for i, b in enumerate([ba, bb, bc]):
be_ = self._box_edges(n_real_1d = self.n_real[i], step = b)
be_ = regroup(be_)
be.append(be_)
self.starts = []
self.ends = []
for i in be[0]:
for j in be[1]:
for k in be[2]:
self.starts.append([i[0], j[0], k[0]])
self.ends.append([i[1], j[1], k[1]])
return len(self.starts)
def _box_edges(self, n_real_1d, step):
limits = []
for i in range(0, n_real_1d, step): limits.append(i)
limits.append(n_real_1d)
box_1d = []
for i in range(len(limits)):
if(i == 0): box_1d.append([limits[0], limits[1]])
elif(i!= len(limits)-1): box_1d.append([limits[i], limits[i+1]])
return box_1d
class boxes(object):
"""
Split box defined by n_real into boxes where each box is a fraction of the
whole box.
"""
def __init__(self,
n_real,
fraction = None,
log = None,
max_boxes = 2000,
prefix = ""):
self.n_real = n_real
i = 0
n_boxes = 1.e+9
n_boxes_ = []
while n_boxes>max_boxes:
ba, bb, bc = \
min(10+i, max(3, int(n_real[0]*fraction))), \
min(10+i, max(3, int(n_real[1]*fraction))), \
min(10+i, max(3, int(n_real[2]*fraction)))
n_boxes = self._generate_boxes(ba, bb, bc)
if(n_boxes_.count(n_boxes)>3): break
n_boxes_.append(n_boxes)
i += 1
assert n_boxes == len(self.starts)
if(log):
print(prefix, "n1, n2, n3 (n_real) :", n_real, file = log)
print(prefix, "points per box edge:", ba, bb, bc, file = log)
print(prefix, "number of boxes :", len(self.starts), file = log)
def _generate_boxes(self, ba, bb, bc):
def regroup(be):
maxe = be[len(be)-1][1]
step = int(maxe/len(be))
result = []
for i in range(len(be)):
if(i == 0):
l = 0
r = step
elif(i == len(be)-1):
l = i*step
r = maxe
else:
l = i*step
r = (i+1)*step
result.append([l, r])
return result
be = []
for i, b in enumerate([ba, bb, bc]):
be_ = self._box_edges(n_real_1d = self.n_real[i], step = b)
be_ = regroup(be_)
be.append(be_)
self.starts = []
self.ends = []
for i in be[0]:
for j in be[1]:
for k in be[2]:
self.starts.append([i[0], j[0], k[0]])
self.ends.append([i[1], j[1], k[1]])
return len(self.starts)
def _box_edges(self, n_real_1d, step):
limits = []
for i in range(0, n_real_1d, step): limits.append(i)
limits.append(n_real_1d)
box_1d = []
for i in range(len(limits)):
if(i == 0): box_1d.append([limits[0], limits[1]])
elif(i!= len(limits)-1): box_1d.append([limits[i], limits[i+1]])
return box_1d
class peak_search_parameters(object):
def __init__(self, peak_search_level = 1,
max_peaks = 0,
peak_cutoff = None,
interpolate = True,
min_distance_sym_equiv = None,
general_positions_only = False,
effective_resolution = None,
significant_height_fraction = None,
cluster_height_fraction = None,
min_cross_distance = None,
max_clusters = None,
min_cubicle_edge = 5):
adopt_init_args(self, locals(), hide = True)
def _copy_constructor(self, other):
self._peak_search_level = other._peak_search_level
self._max_peaks = other._max_peaks
self._peak_cutoff = other._peak_cutoff
self._interpolate = other._interpolate
self._min_distance_sym_equiv = other._min_distance_sym_equiv
self._general_positions_only = other._general_positions_only
self._effective_resolution = other._effective_resolution
self._significant_height_fraction = other._significant_height_fraction
self._cluster_height_fraction = other._cluster_height_fraction
self._min_cross_distance = other._min_cross_distance
self._max_clusters = other._max_clusters
self._min_cubicle_edge = other._min_cubicle_edge
def peak_search_level(self):
return self._peak_search_level
def max_peaks(self):
return self._max_peaks
def peak_cutoff(self):
return self._peak_cutoff
def interpolate(self):
return self._interpolate
def min_distance_sym_equiv(self):
return self._min_distance_sym_equiv
def general_positions_only(self):
return self._general_positions_only
def effective_resolution(self):
return self._effective_resolution
def significant_height_fraction(self):
return self._significant_height_fraction
def cluster_height_fraction(self):
return self._cluster_height_fraction
def min_cross_distance(self):
return self._min_cross_distance
def max_clusters(self):
return self._max_clusters
def min_cubicle_edge(self):
return self._min_cubicle_edge
class cluster_site_info(object):
def __init__(self, peak_list_index, grid_index, grid_height, site, height):
self.peak_list_index = peak_list_index
self.grid_index = grid_index
self.grid_height = grid_height
self.site = site
self.height = height
class peak_cluster_analysis(object):
def __init__(self, peak_list,
special_position_settings,
general_positions_only = False,
effective_resolution = None,
significant_height_fraction = None,
cluster_height_fraction = None,
min_cross_distance = None,
max_clusters = None,
min_cubicle_edge = 5):
if (effective_resolution is not None):
if (significant_height_fraction is None):
significant_height_fraction = 1/5
if (cluster_height_fraction is None):
cluster_height_fraction = 1/3
if (min_cross_distance is None):
min_cross_distance = special_position_settings.min_distance_sym_equiv()
adopt_init_args(self, locals(), hide = True)
assert self._min_cross_distance is not None
self._gridding = peak_list.gridding()
if (effective_resolution is not None):
self._is_processed = flex.bool(peak_list.size(), False)
else:
self._is_processed = None
if ( effective_resolution is not None
or debug_peak_cluster_analysis == "use_old"):
self._site_cluster_analysis = None
else:
self._site_cluster_analysis = \
self._special_position_settings.site_cluster_analysis(
min_cross_distance = self._min_cross_distance,
min_self_distance
= self._special_position_settings.min_distance_sym_equiv(),
general_positions_only = self._general_positions_only,
min_cubicle_edge = self._min_cubicle_edge)
self._peak_list_indices = flex.size_t()
self._peak_list_index = 0
self._sites = flex.vec3_double()
self._heights = flex.double()
self._fixed_site_indices = flex.size_t()
def __next__(self):
if (self._effective_resolution is not None):
return self.next_with_effective_resolution()
else:
return self.next_site_cluster_analysis()
next = __next__
def all(self, max_clusters = None):
if (self._effective_resolution is not None):
return self.all_with_effective_resolution(max_clusters = max_clusters)
else:
return self.all_site_cluster_analysis(max_clusters = max_clusters)
def __iter__(self):
while 1:
site_info = next(self)
if site_info is None: break
yield site_info
def peak_list(self):
return self._peak_list
def special_position_settings(self):
return self._special_position_settings
def general_positions_only(self):
return self._general_positions_only
def effective_resolution(self):
return self._effective_resolution
def significant_height_fraction(self):
return self._significant_height_fraction
def cluster_height_fraction(self):
return self._cluster_height_fraction
def min_cross_distance(self):
return self._min_cross_distance
def max_clusters(self):
return self._max_clusters
def site_cluster_analysis(self):
return self._site_cluster_analysis
def peak_list_indices(self):
return self._peak_list_indices
def fixed_site_indices(self):
return self._fixed_site_indices
def sites(self):
return self._sites
def heights(self):
return self._heights
def max_grid_height(self):
if (self._peak_list.size() == 0):
return None
return self._peak_list.heights()[0]
def append_fixed_site(self, site, height = 0):
if (self._site_cluster_analysis is not None):
self._site_cluster_analysis.insert_fixed_site_frac(original_site = site)
self._fixed_site_indices.append(self._sites.size())
self._sites.append(site)
self._heights.append(height)
self._peak_list_indices.append(self._peak_list.size())
def discard_last(self):
assert self._peak_list_indices.size() > 0
if (self._site_cluster_analysis is not None):
self._site_cluster_analysis.discard_last()
self._peak_list_indices.pop_back()
self._sites.pop_back()
self._heights.pop_back()
def next_site_cluster_analysis(self):
while 1:
peak_list_index = self._peak_list_index
if (peak_list_index >= self._peak_list.size()): return None
self._peak_list_index += 1
site_symmetry = self._special_position_settings.site_symmetry(
site = self._peak_list.sites()[peak_list_index])
site = site_symmetry.exact_site()
if (not self._site_cluster_analysis.process_site_frac(
original_site = site,
site_symmetry_ops = site_symmetry)): continue
height = self._peak_list.heights()[peak_list_index]
self._peak_list_indices.append(peak_list_index)
self._sites.append(site)
self._heights.append(height)
return cluster_site_info(
peak_list_index = peak_list_index,
grid_index = self._peak_list.grid_indices(peak_list_index),
grid_height = self._peak_list.grid_heights()[peak_list_index],
site = site,
height = height)
def all_site_cluster_analysis(self, max_clusters = None):
if (max_clusters is None):
max_clusters = self._max_clusters
assert max_clusters is not None
while 1:
if (self._sites.size() >= max_clusters): break
if (self.next_site_cluster_analysis() is None): break
return self
def next_with_effective_resolution(self):
while 1:
peak_list_index = self._peak_list_index
if (peak_list_index >= self._peak_list.size()): return None
self._peak_list_index += 1
if (self._is_processed is not None):
if (self._is_processed[peak_list_index]): continue
self._is_processed[peak_list_index] = True
grid_index = self._peak_list.grid_indices(peak_list_index)
grid_height = self._peak_list.grid_heights()[peak_list_index]
site = self._peak_list.sites()[peak_list_index]
height = self._peak_list.heights()[peak_list_index]
site_symmetry = self._special_position_settings.site_symmetry(site)
if ( self._general_positions_only
and not site_symmetry.is_point_group_1()):
continue
site = site_symmetry.exact_site()
equiv_sites = sgtbx.sym_equiv_sites(site_symmetry)
keep = True
if (self._sites.size() > 250):
import warnings
warnings.warn(
message = "This function should not be used for"
" processing a large number of peaks.",
category = RuntimeWarning)
for s in self._sites:
dist = sgtbx.min_sym_equiv_distance_info(equiv_sites, s).dist()
if (dist < self._min_cross_distance):
keep = False
break
if (keep == True):
if ( self._effective_resolution is not None
and ( self._heights.size() == 0
or height < self._heights[0]
* self._significant_height_fraction)):
site, height = self._accumulate_significant(
site, height, site_symmetry, equiv_sites)
self._peak_list_indices.append(peak_list_index)
self._sites.append(site)
self._heights.append(height)
return cluster_site_info(
peak_list_index = peak_list_index,
grid_index = grid_index,
grid_height = grid_height,
site = site,
height = height)
def _accumulate_significant(self, site, height, site_symmetry, equiv_sites):
unit_cell = self.special_position_settings().unit_cell()
orth = unit_cell.orthogonalize
frac = unit_cell.fractionalize
sum_w_sites = matrix.col(orth(site)) * height
sum_w = height
height_cutoff = height * self._cluster_height_fraction
for i in range(self._peak_list_index, self._peak_list.size()):
if (self._is_processed[i]): continue
other_height = self._peak_list.heights()[i]
if (other_height < height_cutoff): break
other_site = self._peak_list.sites()[i]
other_site_symmetry = self._special_position_settings.site_symmetry(
other_site)
if ( self._general_positions_only
and not other_site_symmetry.is_point_group_1()):
self._is_processed[i] = True
continue
other_site = other_site_symmetry.exact_site()
dist_info = sgtbx.min_sym_equiv_distance_info(equiv_sites, other_site)
dist = dist_info.dist()
if (dist < self._min_cross_distance):
self._is_processed[i] = True
close_site = dist_info.apply(flex.vec3_double([other_site]))[0]
close_site = site_symmetry.special_op() * close_site
sum_w_sites += matrix.col(orth(close_site)) * other_height
sum_w += other_height
return frac(sum_w_sites / sum_w), height
def all_with_effective_resolution(self, max_clusters = None):
if (max_clusters is None):
max_clusters = self._max_clusters
assert max_clusters is not None
while 1:
if (self._sites.size() >= max_clusters): break
if (self.next_with_effective_resolution() is None): break
return self
def region_density_correlation(
large_unit_cell,
large_d_min,
large_density_map,
sites_cart,
site_radii,
work_scatterers):
sites_frac_large = large_unit_cell.fractionalize(sites_cart)
large_frac_min = sites_frac_large.min()
large_frac_max = sites_frac_large.max()
large_n_real = large_density_map.focus()
from scitbx import fftpack
from libtbx.math_utils import ifloor, iceil
large_ucp = large_unit_cell.parameters()
small_n_real = [0, 0, 0]
small_origin_in_large_grid = [0, 0, 0]
small_abc = [0, 0, 0]
sites_frac_shift = [0, 0, 0]
for i in range(3):
grid_step = large_ucp[i] / large_n_real[i]
buffer = large_d_min / grid_step
grid_min = ifloor(large_frac_min[i] * large_n_real[i] - buffer)
grid_max = iceil(large_frac_max[i] * large_n_real[i] + buffer)
min_grid = grid_max - grid_min + 1
small_n_real[i] = fftpack.adjust_gridding(min_grid = min_grid, max_prime = 5)
if (small_n_real[i] < large_n_real[i]):
shift_min = (small_n_real[i] - min_grid) // 2
small_origin_in_large_grid[i] = grid_min - shift_min
small_abc[i] = small_n_real[i] * grid_step
sites_frac_shift[i] = small_origin_in_large_grid[i] / large_n_real[i]
else:
small_n_real[i] = large_n_real[i]
small_origin_in_large_grid[i] = 0
small_abc[i] = large_ucp[i]
sites_frac_shift[i] = 0
sites_cart_shift = large_unit_cell.orthogonalize(sites_frac_shift)
sites_cart_small = sites_cart - sites_cart_shift
from cctbx import xray
small_xray_structure = xray.structure(
crystal_symmetry = crystal.symmetry(
unit_cell = tuple(small_abc)+large_ucp[3:],
space_group_symbol = "P1"),
scatterers = work_scatterers)
small_xray_structure.set_sites_cart(sites_cart = sites_cart_small)
small_f_calc = small_xray_structure.structure_factors(
d_min = large_d_min).f_calc()
small_gridding = crystal_gridding(
unit_cell = small_f_calc.unit_cell(),
space_group_info = small_f_calc.space_group_info(),
pre_determined_n_real = small_n_real)
from cctbx import miller
small_fft_map = miller.fft_map(
crystal_gridding = small_gridding,
fourier_coefficients = small_f_calc)
small_fft_map.apply_sigma_scaling()
small_map = small_fft_map.real_map_unpadded()
grid_indices = grid_indices_around_sites(
unit_cell = small_xray_structure.unit_cell(),
fft_n_real = small_n_real,
fft_m_real = small_n_real,
sites_cart = sites_cart_small,
site_radii = site_radii)
small_copy_from_large_map = copy(
map_unit_cell = large_density_map,
first = small_origin_in_large_grid,
last = matrix.col(small_origin_in_large_grid)
+ matrix.col(small_n_real)
- matrix.col((1, 1, 1)))
assert small_copy_from_large_map.all() == small_map.all()
corr = flex.linear_correlation(
x = small_map.select(grid_indices),
y = small_copy_from_large_map.select(grid_indices))
if (not corr.is_well_defined()):
return None
return corr.coefficient()
def ccv(map_1, map_2, modified, centered, cutoff = None, n_bins = 10000):
if(modified):
map_1 = volume_scale(map = map_1, n_bins = n_bins).map_data()
map_2 = volume_scale(map = map_2, n_bins = n_bins).map_data()
if(cutoff is not None):
map_1 = map_1 - cutoff
map_2 = map_2 - cutoff
s1 = map_1 < 0
s2 = map_2 < 0
map_1 = map_1.set_selected(s1, 0)
map_2 = map_2.set_selected(s2, 0)
def corr(x, y, centered):
s1 = x > 0
s2 = y > 0
s = s1 | s2
s = s.iselection()
x_ = x.select(s)
y_ = y.select(s)
return flex.linear_correlation(x = x_, y = y_,
subtract_mean = centered).coefficient()
return corr(x = map_1, y = map_2, centered = centered)
else:
return flex.linear_correlation(x = map_1.as_1d(), y = map_2.as_1d(),
subtract_mean = centered).coefficient()
class spherical_variance_around_point(object):
def __init__(self,
real_map,
unit_cell,
site_cart,
radius,
n_points = 40,
spline_interpolation = True,
write_sphere_points_to_pdb_file = None):
self.site_cart = site_cart
self.radius = radius
assert n_points>0
sphere_points = []
x, y, z = site_cart
# reference: "Distributing many points on a sphere" by E.B. Saff and
# A.B.J. Kuijlaars, Mathematical Intelligencer 19.1 (1997) 5--11.
# derived from http://packinon.sourceforge.net/py_progs/pg_saff.html
for k in range(1, n_points+1):
h = -1 + 2 * (k - 1) / float(n_points - 1)
theta = math.acos(h)
if (k == 1) or (k == n_points):
phi = 0
else:
phi = (old_phi + 3.6/math.sqrt(n_points*(1-h*h))) % (2*math.pi)
sphere_points.append((
x + math.sin(phi)*math.sin(theta),
y + math.cos(theta),
z + math.cos(phi)*math.sin(theta) ))
old_phi = phi
map_values = flex.double()
for point in sphere_points :
site_frac = unit_cell.fractionalize(site_cart = point)
value_at_point = real_map.tricubic_interpolation(site_frac)
map_values.append(value_at_point)
self.min = flex.min(map_values)
self.max = flex.max(map_values)
self.mean = flex.mean(map_values)
self.standard_deviation = map_values.standard_deviation_of_the_sample()
if (write_sphere_points_to_pdb_file is not None):
f = open(write_sphere_points_to_pdb_file, "w")
for i, point in enumerate(sphere_points):
f.write(
"HETATM 1 O HOH A 1 %7.3f %7.3f %7.3f 1.00 20.00\n"%
point)
f.close()
def show(self, out = None, prefix = ""):
if (out is None) : out = sys.stdout
print("%sMap values around point [%g, %g, %g], radius = %g:" % \
(prefix, self.site_cart[0], self.site_cart[1], self.site_cart[2],
self.radius), file = out)
print("%s min = %.2f max = %.2f mean = %.2f stddev = %.2f" % \
(prefix, self.min, self.max, self.mean, self.standard_deviation), file = out)
def principal_axes_of_inertia(
real_map,
site_cart,
unit_cell,
radius):
st = sphericity_tensor(
map_data = real_map,
unit_cell = unit_cell,
radius = radius,
site_frac = unit_cell.fractionalize(site_cart))
es = adptbx.eigensystem(st)
def center_of_mass_():
return center_of_mass(
map_data = real_map, unit_cell = unit_cell, cutoff = 0.1)
def inertia_tensor():
return st
def eigensystem():
return es
return group_args(
center_of_mass = center_of_mass_,
inertia_tensor = inertia_tensor,
eigensystem = eigensystem)
class local_scale(object):
def __init__(
self,
crystal_gridding,
crystal_symmetry,
f_map = None,
map_data = None,
miller_array = None,
d_min = None): #XXX = 1: more features and noise
# process inputs
assert [f_map, map_data].count(None) == 1
if(f_map is not None):
import cctbx.miller
fft_map = cctbx.miller.fft_map(
crystal_gridding = crystal_gridding,
fourier_coefficients = f_map)
fft_map.apply_sigma_scaling()
map_data = fft_map.real_map_unpadded()
#
self.map_result = None
self.map_coefficients = None
b = boxes(
n_real = crystal_gridding.n_real(),
fraction = 0.03)
# Loop over boxes, fill map_result with one box at a time
self.map_result = flex.double(flex.grid(b.n_real))
for s, e in zip(b.starts, b.ends):
box = copy(map_data, s, e)
box.reshape(flex.grid(box.all()))
mi, ma, me = box.as_1d().min_max_mean().as_tuple()
if(mi < ma):
box = volume_scale(map = box, n_bins = 1000).map_data()
set_box(
map_data_from = box,
map_data_to = self.map_result,
start = s,
end = e)
sd = self.map_result.sample_standard_deviation()
self.map_result = self.map_result/sd
if(miller_array is not None):
complete_set = miller_array
if(d_min is not None):
d_min = miller_array.d_min()
complete_set = miller_array.complete_set(d_min = d_min)
self.map_coefficients = complete_set.structure_factors_from_map(
map = self.map_result,
use_scale = True,
anomalous_flag = False,
use_sg = False)
def sphericity_by_heuristics(
map_data,
unit_cell,
center_cart,
radius,
s_angle_sampling_step = 20,
t_angle_sampling_step = 20):
points_on_sphere_cart = flex.vec3_double()
for s in range(0, 360, s_angle_sampling_step):
for t in range(0, 360, t_angle_sampling_step):
xc, yc, zc = scitbx.math.point_on_sphere(r = radius, s_deg = s, t_deg = t,
center = center_cart)
points_on_sphere_cart.append([xc, yc, zc])
o = sphericity2(
map_data = map_data,
center_cart = center_cart,
points_on_sphere_cart = points_on_sphere_cart,
unit_cell = unit_cell)
return group_args(rho = o.rho_min_max_mean(), ccs = o.ccs_min_max_mean())
def map_peak_3d_as_2d(
map_data,
unit_cell,
center_cart,
radius,
step = 0.01,
s_angle_sampling_step = 10,
t_angle_sampling_step = 10):
rho_1d = flex.double()
dist = flex.double()
radius = int(radius*100)+1
step = int(step*100)
for r in range(0, radius, step):
r = r/100.
dist.append(r)
rho = flex.double()
for s in range(0, 360, s_angle_sampling_step):
for t in range(0, 360, t_angle_sampling_step):
xc, yc, zc = scitbx.math.point_on_sphere(r = r, s_deg = s, t_deg = t,
center = center_cart)
xf, yf, zf = unit_cell.fractionalize([xc, yc, zc])
rho.append(map_data.eight_point_interpolation([xf, yf, zf]))
#rho.append(map_data.tricubic_interpolation([xf, yf, zf]))
rho_1d.append(flex.mean(rho))
return dist, rho_1d
class positivity_constrained_density_modification(object):
def __init__(self, f, f_000, n_cycles = 100, resolution_factor = 0.25, d_min = None,
crystal_gridding = None, complete_set = None):
self.f = f
self.d_min = d_min
self.map = None
self.crystal_gridding = crystal_gridding
from cctbx import miller
if(self.d_min is None): self.d_min = self.f.d_min()
if(complete_set is None):
complete_set = self.f.complete_set(d_min = self.d_min)
if(self.crystal_gridding is None):
self.crystal_gridding = self.f.crystal_gridding(
d_min = d_min,
resolution_factor = resolution_factor,
grid_step = None,
symmetry_flags = None,
mandatory_factors = None,
max_prime = 5,
assert_shannon_sampling = True)
self.f_mod = self.f.deep_copy()
for i in range(n_cycles):
fft_map = miller.fft_map(
crystal_gridding = self.crystal_gridding,
fourier_coefficients = self.f_mod,
f_000 = f_000)
if(f_000 is not None): fft_map.apply_volume_scaling()
self.map = fft_map.real_map_unpadded()
convert_to_non_negative(self.map, 0)
self.f_mod = complete_set.structure_factors_from_map(
map = self.map,
use_scale = True,
anomalous_flag = False,
use_sg = False)
self.f_mod = self.f.complete_with(other = self.f_mod, scale = True,
replace_phases = True)
#self.assert_equal()
def assert_equal(self):
from libtbx.test_utils import approx_equal
x, y = self.f, self.f_mod
x, y = x.common_sets(y)
x = abs(x).data()
y = abs(y).data()
assert approx_equal(x, y)
def d_min_corner(map_data, unit_cell):
max_index = flex.miller_index( [[(i-1)//2 for i in map_data.all()]] )
return uctbx.d_star_sq_as_d(unit_cell.max_d_star_sq( max_index ))
def d_min_from_map(map_data, unit_cell, resolution_factor = 1./2.):
a, b, c = unit_cell.parameters()[:3]
nx, ny, nz = map_data.all()
d1, d2, d3 = \
a/nx/resolution_factor, \
b/ny/resolution_factor, \
c/nz/resolution_factor
return max(d1, d2, d3)
def map_coefficients_to_map(map_coeffs, crystal_symmetry, n_real):
assert isinstance(map_coeffs.data(), flex.complex_double)
cg = crystal_gridding(
unit_cell = crystal_symmetry.unit_cell(),
space_group_info = crystal_symmetry.space_group_info(),
pre_determined_n_real = n_real)
fft_map = map_coeffs.fft_map(
crystal_gridding = cg,
symmetry_flags = use_space_group_symmetry)
fft_map.apply_volume_scaling()
return fft_map.real_map_unpadded()
def map_to_map_coefficients(m, cs, d_min):
import cctbx.miller
fft = fftpack.real_to_complex_3d([i for i in m.all()])
map_box = copy(
m, flex.grid(fft.m_real()).set_focus(m.focus()))
map_box.reshape(flex.grid(fft.m_real()).set_focus(fft.n_real()))
map_box = fft.forward(map_box)
box_structure_factors = structure_factors.from_map(
unit_cell = cs.unit_cell(),
space_group_type = cs.space_group().type(),
anomalous_flag = False,
d_min = d_min,
complex_map = map_box,
conjugate_flag = True,
discard_indices_affected_by_aliasing = True)
n = map_box.all()[0] * map_box.all()[1] * map_box.all()[2]
map_coeffs = cctbx.miller.set(
crystal_symmetry = cs,
anomalous_flag = False,
indices = box_structure_factors.miller_indices(),
).array(data = box_structure_factors.data()/n)
return map_coeffs
def atom_radius_as_central_peak_width(element, b_iso, d_min, scattering_table):
"""
Estimate atom radius as half-width of the central peak of Fourier image.
"""
from cctbx import xray, miller
dim = 40.
cs = crystal.symmetry((dim, dim, dim, 90, 90, 90), "P 1")
sp = crystal.special_position_settings(cs)
sc = xray.scatterer(
scattering_type = element,
site = (0, 0, 0),
u = adptbx.b_as_u(b_iso))
scatterers = flex.xray_scatterer([sc])
xrs = xray.structure(sp, scatterers)
xrs.scattering_type_registry(table = scattering_table)
cg = crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
step = 0.1)
fc = xrs.structure_factors(d_min = d_min, algorithm = "direct").f_calc()
fft_map = miller.fft_map(
crystal_gridding = cg,
fourier_coefficients = fc,
f_000 = xrs.f_000())
fft_map.apply_volume_scaling()
map_data = fft_map.real_map_unpadded()
def search_curve(map_data, dim):
x = 0.
step = 0.01
mv_max = None
mv_prev = None
while x<= dim:
mv = map_data.eight_point_interpolation([x/dim, 0, 0])
if(mv_prev is not None and mv>mv_prev): return x-step
if(mv_max is None): mv_max = mv
if(mv_max/mv>100.): return x-step
if(mv<0.): return x-step
x+= step
mv_prev = mv
return None
radius = search_curve(map_data = map_data, dim = dim)
assert radius is not None
return radius
class atom_curves(object):
"""
Class-toolkit to compute various 1-atom 1D curves: exact electron density,
Fourier image of specified resolution, etc.
"""
def __init__(self, scattering_type, scattering_table = "wk1995",
scattering_dictionary=None):
adopt_init_args(self, locals())
assert [self.scattering_table, self.scattering_dictionary].count(None)==1
self.scr = self.get_xray_structure(box = 1, b = 0).scattering_type_registry()
self.uff = self.scr.unique_form_factors_at_d_star_sq
def get_xray_structure(self, box, b):
cs = crystal.symmetry((box, box, box, 90, 90, 90), "P 1")
sp = crystal.special_position_settings(cs)
from cctbx import xray
sc = xray.scatterer(
scattering_type = self.scattering_type,
site = (0, 0, 0),
u = adptbx.b_as_u(b))
scatterers = flex.xray_scatterer([sc])
xrs = xray.structure(sp, scatterers)
if(self.scattering_table is not None):
xrs.scattering_type_registry(table = self.scattering_table)
else:
xrs.scattering_type_registry(custom_dict = self.scattering_dictionary)
return xrs
def exact_density_at_r(self, r, b_iso):
return self.scr.gaussian(self.scattering_type).electron_density(r, b_iso)
def exact_gradient_at_r(self, r, t, t0, b_iso):
return self.scr.gaussian(self.scattering_type).gradient(r = r, t = t, t0 = t0,
b_iso = b_iso)
def exact_density(self, b_iso, radius_max = 5., radius_step = 0.001):
r = 0.0
density = flex.double()
radii = flex.double()
ed = self.scr.gaussian(self.scattering_type)
while r < radius_max:
density.append(ed.electron_density(r, b_iso))
radii.append(r)
r+= radius_step
return group_args(radii = radii, density = density)
def form_factor(self, ss, b_iso):
dss = 4*ss
return self.uff(dss)[0]*math.exp(-b_iso*ss)
def integrand(self, r, b_iso):
def compute(s):
ss = (s/2)**2
if(abs(r)>1.e-9):
return 2/r * s * self.form_factor(ss, b_iso) * math.sin(2*math.pi*r*s)
else:
return 4*math.pi * s**2 * self.form_factor(ss, b_iso)
return compute
def image(self,
d_min,
b_iso,
d_max = None,
radius_min = 0,
radius_max = 5.,
radius_step = 0.001,
n_integration_steps = 2000):
r = radius_min
assert d_max != 0.
if(d_max is None): s_min = 0
else: s_min = 1./d_max
assert d_min != 0.
s_max = 1./d_min
image_values = flex.double()
radii = flex.double()
while r < radius_max:
s = scitbx.math.simpson(
f = self.integrand(r, b_iso), a = s_min, b = s_max, n = n_integration_steps)
image_values.append(s)
radii.append(r)
r+= radius_step
# Fine first inflection point
first_inflection_point = None
i_first_inflection_point = None
size = image_values.size()
second_derivatives = flex.double()
for i in range(size):
if(i>0 and i<size-1):
dxx = image_values[i-1]+image_values[i+1]-2*image_values[i]
elif(i == 0):
dxx = 2*image_values[i+1]-2*image_values[i]
else:
dxx = second_derivatives[i-1]*radius_step**2
if(first_inflection_point is None and dxx>0):
first_inflection_point = (radii[i-1]+radii[i])/2.
i_first_inflection_point = i
second_derivatives.append(dxx/radius_step**2)
return group_args(
radii = radii,
image_values = image_values,
first_inflection_point = first_inflection_point,
i_first_inflection_point = i_first_inflection_point,
radius = first_inflection_point*2,
second_derivatives = second_derivatives)
def image_from_miller_indices(self, miller_indices, b_iso, uc,
radius_max, radius_step):
p2 = flex.double()
tmp = flex.double()
for mi in miller_indices:
p2.append(self.form_factor(ss = uc.d_star_sq(mi)/4, b_iso = b_iso))
tmp.append( 2*math.pi*mi[2] )
mv = flex.double()
rad = flex.double()
z = 0.0
while z < radius_max:
result = 0
for mi, p2i, tmpi in zip(miller_indices, p2, tmp):
result += p2i*math.cos(tmpi*z)
rad.append(z)
mv.append(result*2)
z+= radius_step
return group_args(radii = rad, image_values = mv/uc.volume())
def image_from_3d(self, box, b, step, unit_cell, space_group_info,
miller_array):
from cctbx import miller
xrs = self.get_xray_structure(box = box, b = b)
fc = miller_array.structure_factors_from_scatterers(
xray_structure = xrs, algorithm = "direct").f_calc()
cg = crystal_gridding(
unit_cell = unit_cell,
space_group_info = space_group_info,
step = step,
symmetry_flags = use_space_group_symmetry)
fft_map = miller.fft_map(
crystal_gridding = cg,
fourier_coefficients = fc)
fft_map.apply_volume_scaling()
map_data = fft_map.real_map_unpadded()
mv = flex.double()
radii = flex.double()
r = 0
while r < box:
mv_ = map_data.eight_point_interpolation([r/box, 0, 0])
mv.append(mv_)
radii.append(r)
r+= step
return group_args(radii = radii, image_values = mv)
def one_gaussian_exact(self, r, A0, B0, b = 0):
cmn = 4*math.pi/(B0+b)
return A0*cmn**1.5 * math.exp(-math.pi*cmn*r**2)
def one_gaussian_approximation(self, d_min, b, use_inflection_point = True):
ib0 = self.image(
d_min = d_min, b_iso = 0, radius_max = 5, radius_step = 0.01)
if(use_inflection_point):
i_cut = ib0.i_first_inflection_point
else:
i_cut = None
for i in range(ib0.radii.size()):
if(ib0.image_values[i]<= 0):
rad_cut = ib0.radii[i-1]
i_cut = i-1
break
assert i_cut is not None
# this gives a*exp(-b*x**2)
r = scitbx.math.gaussian_fit_1d_analytical(
x = ib0.radii[:i_cut], y = ib0.image_values[:i_cut])
B0 = 4*math.pi**2/r.b
A0 = r.a/(r.b/math.pi)**1.5
image_approx_values = flex.double()
for rad in ib0.radii:
v = self.one_gaussian_exact(r = rad, A0 = A0, B0 = B0, b = b)
image_approx_values.append(v)
return group_args(image_b0 = ib0, image_approx_at_b = image_approx_values,
i_cut = i_cut, n_points = ib0.radii.size())
def sharpen2(map, xray_structure, resolution, file_name_prefix):
from cctbx import miller
fo = miller.structure_factor_box_from_map(
crystal_symmetry = xray_structure.crystal_symmetry(), map = map)
#
fc = fo.structure_factors_from_scatterers(
xray_structure = xray_structure).f_calc()
d_fsc_model = fc.d_min_from_fsc(
other = fo, bin_width = 100, fsc_cutoff = 0.).d_min
print("d_fsc_model:", d_fsc_model)
#resolution = min(resolution, d_fsc_model)
#resolution = d_fsc_model
print(resolution, d_fsc_model)
#
xray_structure = xray_structure.set_b_iso(value = 0)
fc = fo.structure_factors_from_scatterers(
xray_structure = xray_structure).f_calc()
d_spacings = fo.d_spacings().data()
#
cc = -999
d_best = None
data = fc.data().deep_copy()
for d in [i/10. for i in range(10, 100)]:
sel = d_spacings<d
data_ = data.set_selected(sel, 0)
fc_ = fc.customized_copy(data = data_)
cc_ = fo.map_correlation(other = fc_)
if(cc_>cc):
cc = cc_
d_best = d
#print "%8.1f %10.6f"%(d, cc_)
print("Best d:", d_best)
#
fc1 = xray_structure.structure_factors(d_min = resolution).f_calc()
fc2 = fc1.resolution_filter(d_min = d_best)
cg = crystal_gridding(
unit_cell = xray_structure.crystal_symmetry().unit_cell(),
space_group_info = xray_structure.crystal_symmetry().space_group_info(),
d_min = resolution)
map2 = fc2.fft_map(crystal_gridding = cg).real_map_unpadded()
cc = -999
b = None
ss = 1./flex.pow2(fc1.d_spacings().data()) / 4.
data = fc1.data()
for b_ in range(1, 500, 1):
xray_structure = xray_structure.set_b_iso(value = b_)
sc = flex.exp(-b_*ss)
fc1 = fc1.customized_copy(data = data*sc)
map1 = fc1.fft_map(crystal_gridding = cg).real_map_unpadded()
cc_ = flex.linear_correlation(x = map1.as_1d(), y = map2.as_1d()).coefficient()
if(cc_>cc):
cc = cc_
b = b_
#print "%8.0f %10.6f"%(b_, cc_)
print("Best B:", b)
#
fo_sharp = fo.resolution_filter(d_min = resolution)
ss = 1./flex.pow2(fo_sharp.d_spacings().data()) / 4.
#B_sharp = -35.
B_sharp = -1*b
sc = flex.exp(-B_sharp*ss)
fo_sharp = fo_sharp.customized_copy(data = fo_sharp.data()*sc)
# output
mtz_dataset = fo_sharp.as_mtz_dataset(column_root_label = "F")
mtz_object = mtz_dataset.mtz_object()
mtz_object.write(file_name = "%s.mtz"%file_name_prefix)
fft_map = fo_sharp.fft_map(crystal_gridding = cg)
fft_map.apply_sigma_scaling()
map_data = fft_map.real_map_unpadded()
#
import mmtbx.masks
mask_object = mmtbx.masks.smooth_mask(
xray_structure = xray_structure,
n_real = map_data.all(),
rad_smooth = 2.0)
map_data = map_data * mask_object.mask_smooth
#
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = "%s.ccp4"%file_name_prefix,
unit_cell = cg.unit_cell(),
space_group = cg.space_group(),
#gridding_first = (0, 0, 0), # This causes a bug (map gets shifted)
#gridding_last = n_real, # This causes a bug (map gets shifted)
map_data = map_data,
labels = flex.std_string([""]))
return fo_sharp, map_data
def loc_res(map,
model, #pdb_hierarchy,
crystal_symmetry,
chunk_size = 10,
soft_mask_radius = 3.,
method = "fsc",
hard_d_min = 1.5,
b_range_low = -200,
b_range_high = 500,
fsc_cutoff = 0.143,
wrapping = False,
verbose = False,
log = sys.stdout):
assert method in ["fsc", "rscc", "rscc_d_min_b"]
from cctbx import maptbx
from cctbx import miller
import mmtbx.utils
from iotbx.map_model_manager import map_model_manager
mmm = map.as_1d().min_max_mean().as_tuple()
map = map-mmm[2]
map = map/map.sample_standard_deviation()
cg = maptbx.crystal_gridding(
unit_cell = crystal_symmetry.unit_cell(),
space_group_info = crystal_symmetry.space_group_info(),
pre_determined_n_real = map.accessor().all())
#
pdb_hierarchy = model.get_hierarchy()
ph_dc = pdb_hierarchy.deep_copy()
xrs = pdb_hierarchy.extract_xray_structure(crystal_symmetry = crystal_symmetry)
mmtbx.utils.setup_scattering_dictionaries(
scattering_table = "electron",
xray_structure = xrs,
d_min = 1.0)
#
bs = pdb_hierarchy.atoms().extract_b()
if method == "rscc_d_min_b":
occs = pdb_hierarchy.atoms().extract_occ()
else:
occs = None
results_b = flex.double()
results = flex.double()
chunk_selections = pdb_hierarchy.chunk_selections(
residues_per_chunk = chunk_size)
#
from iotbx.map_manager import map_manager
mm = map_manager(map_data = map,
unit_cell_crystal_symmetry = crystal_symmetry,
unit_cell_grid = map.all(),
wrapping = wrapping)
for chunk_sel in chunk_selections:
ph_sel = pdb_hierarchy.select(chunk_sel).deep_copy()
xrs_sel = xrs.select(chunk_sel)
model_sel = model.select(chunk_sel)
mmm = map_model_manager(
model = model_sel,
map_manager = mm)
mmm.box_all_maps_around_model_and_shift_origin(
box_cushion = 3,
)
mmm.map_manager().create_mask_around_atoms(model = mmm.model(),
mask_atoms_atom_radius = 3.)
mmm.map_manager().soft_mask(soft_mask_radius = soft_mask_radius)
mmm.map_manager().apply_mask()
#####
fo = miller.structure_factor_box_from_map(
crystal_symmetry = mmm.model().get_xray_structure().crystal_symmetry(),
map = mmm.map_manager().map_data())
if method == "rscc_d_min_b":
fo = fo.resolution_filter(d_min = hard_d_min)
mmm.model().get_xray_structure().set_b_iso(value = 0.0)
fc = fo.structure_factors_from_scatterers(
xray_structure = mmm.model().get_xray_structure()).f_calc()
b_iso = 0
d_min = 0
cc = 0
if(method == "fsc"):
d_min = fc.d_min_from_fsc(other = fo, bin_width = 100,
fsc_cutoff = fsc_cutoff).d_min
elif(method == "rscc"):
d_spacings = fc.d_spacings().data()
ss = 1./flex.pow2(d_spacings) / 4.
d_min, cc = maptbx.cc_complex_complex(
f_1 = fo.data(),
f_2 = fc.data(),
d_spacings = d_spacings,
ss = ss,
d_mins = flex.double([i/10. for i in range(15, 100)]),
b_iso = 0)
elif(method == "rscc_d_min_b"):
d_spacings = fc.d_spacings().data()
ss = 1./flex.pow2(d_spacings) / 4.
d_min_best = None
cc_best = None
b_best = None
scale = 20
low_value = int(b_range_low/scale)
high_value = max(low_value+1, int(b_range_high/scale))
for b_iso in [ i*scale for i in range(low_value, high_value)]:
d_min, cc = maptbx.cc_complex_complex(
f_1 = fc.data(), # note swapped from rscc f_1 is fixed
f_2 = fo.data(),
d_spacings = d_spacings,
ss = ss,
d_mins = flex.double([i/10. for i in range(int(hard_d_min*10), 100)]),
b_iso = b_iso)
if cc_best is None or cc>cc_best:
cc_best = cc
d_min_best = d_min
b_best = b_iso
cc = cc_best
d_min = d_min_best
b_iso = b_best
if verbose:
print("CHUNK d_min %s b %s cc %s" %(d_min, b_iso, cc), file = log)
results.append(d_min)
results_b.append(b_iso)
ph_sel.adopt_xray_structure(mmm.model().get_xray_structure())
if (method == "rscc_d_min_b"):
bs = bs.set_selected(chunk_sel, b_iso) # b value in B
occs = occs.set_selected(chunk_sel, d_min) # d_min in occ
else:
bs = bs.set_selected(chunk_sel, d_min) # d_min in B value
print(flex.min(results), flex.max(results), flex.mean(results), file = log)
print(flex.min(bs), flex.max(bs), flex.mean(bs), file = log)
pdb_hierarchy.atoms().set_b(bs)
if (method == "rscc_d_min_b"):
pdb_hierarchy.atoms().set_occ(occs)
return pdb_hierarchy
def is_bounded_by_constant(map_data,
relative_sd_tol = 0.1):
''' Determine if this map is bounded on all sides by values that are
zero or a constant, within relative tolerance of relative_sd_tol to
the SD of the map as a whole
Returns True if map boundary values are nearly constant,
and False if they vary
Requires that map is at origin (0,0,0)
'''
assert tuple(map_data.origin()) == (0,0,0)
relative_sd = relative_sd_on_edges(map_data)
# Determine whether values at boundaries are all about the same:
if relative_sd > relative_sd_tol:
return False # Not uniform on edges
else:
return True # uniform on edges
def relative_sd_on_edges(map_data,
skip_if_greater_than = None,
use_maximum = None):
'''
Determine relative SD of values on edges to the map as a whole
Requires that map is at origin (0,0,0)
'''
assert tuple(map_data.origin()) == (0,0,0)
sd_overall = map_data.as_1d().standard_deviation_of_the_sample()
all = list(map_data.all())
boundary_data = flex.double()
relative_sd_on_edges = 0
from cctbx.maptbx import copy
for i in (0, all[0]-1):
new_map_data = copy(map_data,
tuple((i, 0, 0)),
tuple((i, all[1], all[2])))
boundary_data.extend(new_map_data.as_1d())
relative_sd_on_edges = max(relative_sd_on_edges,
new_map_data.as_1d().standard_deviation_of_the_sample() / max(
1.e-10,sd_overall))
if (skip_if_greater_than is not None) and (
relative_sd_on_edges > skip_if_greater_than):
return relative_sd_on_edges
for j in (0, all[1]-1):
new_map_data = copy(map_data,
tuple((0, j, 0)),
tuple((all[0], j, all[2])))
boundary_data.extend(new_map_data.as_1d())
relative_sd_on_edges = max(relative_sd_on_edges,
new_map_data.as_1d().standard_deviation_of_the_sample() / max(
1.e-10,sd_overall))
if (skip_if_greater_than is not None) and (
relative_sd_on_edges > skip_if_greater_than):
return relative_sd_on_edges
for k in (0, all[2]-1):
new_map_data = copy(map_data,
tuple((0, 0, k)),
tuple((all[0], all[1], k)))
boundary_data.extend(new_map_data.as_1d())
relative_sd_on_edges = max(relative_sd_on_edges,
new_map_data.as_1d().standard_deviation_of_the_sample() / max(
1.e-10,sd_overall))
if (skip_if_greater_than is not None) and (
relative_sd_on_edges > skip_if_greater_than):
return relative_sd_on_edges
if use_maximum: # Take maximum for any edge
return relative_sd_on_edges
else: # use overall
return boundary_data.standard_deviation_of_the_sample(
) / max(1.e-10,sd_overall)
def get_resolution_where_significant_data_present(ma,
minimum_fraction_data_points=0.1):
# Now filter ma at resolution where there are significant data
sel = ( ma.amplitudes().data() > 1.e-10)
ma_with_data = ma.select(sel)
n_bins = int(0.5+10 * 1/minimum_fraction_data_points)
ma_with_data.setup_binner(n_bins = n_bins, d_max = 10000.,
d_min = ma_with_data.d_min())
dsd = ma_with_data.d_spacings().data()
ibin_list=list(ma_with_data.binner().range_used())
ibin_list.reverse()
total_data = ma_with_data.size()
minimum_data_points = int(minimum_fraction_data_points * total_data)
total_found = 0
for i_bin in ibin_list:
sel2 = ma_with_data.binner().selection(i_bin)
dd = dsd.select(sel2)
d_max = dd.min_max_mean().max
n = dd.size()
total_found += n
if total_found >= minimum_data_points and total_found < total_data//2:
return d_max
return None
def get_diff_score_towards_periodic(map_data,
minimum_fraction_data_points = None):
'''
Evaluate consistency of high-pass filtered difference map analysis
with that expected for a map that is periodic.
The difference map is difference between the map and the map lacking high-
resolution terms. This difference map shows only high-frequency
information
A map that is periodic should give a difference map that is more or less
uniform everywhere. A non-periodic map should have a discontinuity at the
borders and have high variation in the difference map at the edges.
'''
from cctbx import crystal
dummy_uc_parameters=tuple(list(map_data.all())+[90.,90.,90.])
cs= crystal.symmetry( dummy_uc_parameters, 1)
# Normalize the map data
sd=max(1.e-20,map_data.as_1d().standard_deviation_of_the_sample())
mean=map_data.as_1d().min_max_mean().mean
map_data=(map_data - mean)/sd
# Test for difference map variation at edges of map
# Get all structure factors, back transform to get map that can
# be represented by FT of all map coefficients in box (may not
# be the same as original because gridding may not allow it)
from cctbx import miller
ma = miller.structure_factor_box_from_map(
crystal_symmetry = cs,
map = map_data,
d_min = None)
map_data = map_coefficients_to_map(
map_coeffs = ma,
crystal_symmetry = cs,
n_real = map_data.all())
# Now we have map that can be represented by Fourier coefficients.
# First get the map as Fourier coefficients
ma = miller.structure_factor_box_from_map(
crystal_symmetry = cs,
map = map_data,
d_min = None)
# Ready with map as Fourier coefficients (FT of ma will give map_data again)
# Find highest resolution where there are some non-zero data
d_min_value = get_resolution_where_significant_data_present(ma,
minimum_fraction_data_points = minimum_fraction_data_points)
# High-frequency filter at this resolution
filtered_ma = ma.resolution_filter(d_min = d_min_value)
filtered_map = map_coefficients_to_map(
map_coeffs = filtered_ma,
crystal_symmetry = cs,
n_real = map_data.all())
# Make a difference map to look at only high_frequency terms
diff_map=map_data - filtered_map
# Get the overall SD of the map and SD on edges:
diff_sd = diff_map.as_1d().standard_deviation_of_the_sample()
diff_relative_sd_on_edges = relative_sd_on_edges(diff_map,
use_maximum = True)
# Score based on expectation that a periodic map has a value of about 1
# and a non-periodic map has a value about 2
diff_score_towards_aperiodic = max(0,min(1,(
diff_relative_sd_on_edges - 1)/(2 - 1)))
diff_score_towards_periodic = 1 - diff_score_towards_aperiodic
return diff_score_towards_periodic
def get_edge_score_towards_periodic(map_data,
use_minimum = True):
'''
Measure of whether facing edges have correlated data with correlation
similar to that found for adjacent planes and different than randomly
chosen points
If use_minimum is set, take minimum of values on all pairs of faces
'''
all = list(map_data.all())
one_data = flex.double()
middle_plus_one_data = flex.double()
middle_data = flex.double()
boundary_zero_data = flex.double()
boundary_zero_one_data = flex.double()
boundary_one_data = flex.double()
lowest_relative_cc = 1.0
from cctbx.maptbx import copy
unique_list=[]
for i in (0,1, all[0]-1):
if not i in unique_list: unique_list.append(i)
new_map_data = copy(map_data,
tuple((i, 0, 0)),
tuple((i, all[1], all[2])))
if i == 0:
boundary_zero_data_local=new_map_data.as_1d()
boundary_zero_data.extend(new_map_data.as_1d())
elif i == 1:
one_data_local=new_map_data.as_1d()
one_data.extend(new_map_data.as_1d())
else:
boundary_one_data_local=new_map_data.as_1d()
boundary_one_data.extend(new_map_data.as_1d())
lowest_relative_cc = min(lowest_relative_cc,get_relative_cc(
boundary_zero_data=boundary_zero_data_local,
boundary_one_data=boundary_one_data_local,
one_data=one_data_local,))
assert len(unique_list) == 3
unique_list=[]
for j in (0,1, all[1]-1):
if not j in unique_list: unique_list.append(j)
new_map_data = copy(map_data,
tuple((0, j, 0)),
tuple((all[0], j, all[2])))
if j == 0:
boundary_zero_data_local=new_map_data.as_1d()
boundary_zero_data.extend(new_map_data.as_1d())
elif j == 1:
one_data_local=new_map_data.as_1d()
one_data.extend(new_map_data.as_1d())
else:
boundary_one_data_local=new_map_data.as_1d()
boundary_one_data.extend(new_map_data.as_1d())
assert len(unique_list) == 3
lowest_relative_cc = min(lowest_relative_cc,get_relative_cc(
boundary_zero_data=boundary_zero_data_local,
boundary_one_data=boundary_one_data_local,
one_data=one_data_local,))
unique_list=[]
for k in (0, 1, all[2]-1):
if not k in unique_list: unique_list.append(k)
new_map_data = copy(map_data,
tuple((0, 0, k)),
tuple((all[0], all[1], k)))
if k == 0:
boundary_zero_data_local=new_map_data.as_1d()
boundary_zero_data.extend(new_map_data.as_1d())
elif k == 1:
one_data_local=new_map_data.as_1d()
one_data.extend(new_map_data.as_1d())
else:
boundary_one_data_local=new_map_data.as_1d()
boundary_one_data.extend(new_map_data.as_1d())
assert len(unique_list) == 3
lowest_relative_cc = min(lowest_relative_cc,get_relative_cc(
boundary_zero_data=boundary_zero_data_local,
boundary_one_data=boundary_one_data_local,
one_data=one_data_local,))
# use lowest value of relative_cc for any pair of faces so
# we can detect any faces that are trimmed
if use_minimum:
relative_cc=lowest_relative_cc
else:
relative_cc = get_relative_cc(
boundary_zero_data=boundary_zero_data,
boundary_one_data=boundary_one_data,
one_data=one_data,)
edge_score_towards_periodic = max(0,min(1,relative_cc ))
return edge_score_towards_periodic
def get_relative_cc(
boundary_zero_data = None,
boundary_one_data = None,
one_data = None):
cc_boundary_zero_one= flex.linear_correlation(boundary_zero_data,
boundary_one_data).coefficient()
cc_positive_control= flex.linear_correlation(boundary_zero_data,
one_data).coefficient()
# Make negative control with randomized order of data
one_data_random_perm= one_data.select(
flex.random_permutation(len(one_data)))
cc_negative_control = flex.linear_correlation(boundary_zero_data,
one_data_random_perm).coefficient()
# Expect that negative controls about zero, positive control high near 1,
# then cc_boundary_zero_one like negative_control means planes at
# boundaries differ, and cc_boundary_zero_one like positive means
# boundaries similar (as in wrapped)
relative_cc = (cc_boundary_zero_one - cc_negative_control)/max(1.e-10,
cc_positive_control - cc_negative_control)
return relative_cc
def is_periodic(map_data,
minimum_fraction_data_points = 0.1,
high_confidence_delta = 0.2,
medium_confidence_delta = 0.25):
'''
Determine if this map is periodic. If values on opposite faces are
about as similar as values on adjacent planes, it is probably periodic.
Two tests are used: (1) correlation of facing edges of map and
(2) test whether difference map between original and map without
high resolution data shows most variation at edges (due to mismatch
of edge data at facing edges of map).
Map edge correlation score:
Normally values on adjacent planes are very highly correlated (> 0.9)
and random points in a map have very low correlation (< 0.1). This
allows a test based on correlation of facing edges of a map and comparison
to random pairs of points in map.
Difference map score:
If a map is boxed then if it is treated as a periodic map, there will
be a discontinuity at the edges of the map. This can be detected by
calculating the Fourier transform of the high-resolution map coefficients
for the map and detecting if this high-pass filtered map is dominated by
features at the edge of the map.
Returns True if periodic, False if not, and None if map gridding is
too small (too few planes) or sampling is insufficiently fine to tell.
Requires that map is at origin (0,0,0)
'''
assert tuple(map_data.origin()) == (0,0,0)
# The difference map score is solid if > 0.8 or < 0.2. Otherwise best to
# combine it with edge score (correlation of edges) to get sum score
diff_score_towards_periodic = get_diff_score_towards_periodic(map_data,
minimum_fraction_data_points = minimum_fraction_data_points)
if diff_score_towards_periodic > (1 - high_confidence_delta):
return True
elif diff_score_towards_periodic < high_confidence_delta:
return False
# Get edge score and sum score now
edge_score_towards_periodic = get_edge_score_towards_periodic(map_data)
sum_score_towards_periodic = 0.5* (
diff_score_towards_periodic + edge_score_towards_periodic)
# We can be confident if sum_score is < .25 or > 0.75
if sum_score_towards_periodic > (1 - medium_confidence_delta):
return True
elif sum_score_towards_periodic < medium_confidence_delta:
return False
else:
return None # Really do not know
| 35.719194
| 90
| 0.663956
|
1d528b6262453b10f11297f5ba568f681a2dcb54
| 6,637
|
py
|
Python
|
tureng_cli.py
|
iPatavatsizz/tureng_cli
|
5f2eaad7d76e711e6f4452e9614f9ef84308e634
|
[
"MIT"
] | 1
|
2021-06-07T13:19:32.000Z
|
2021-06-07T13:19:32.000Z
|
tureng_cli.py
|
iPatavatsizz/tureng_cli
|
5f2eaad7d76e711e6f4452e9614f9ef84308e634
|
[
"MIT"
] | null | null | null |
tureng_cli.py
|
iPatavatsizz/tureng_cli
|
5f2eaad7d76e711e6f4452e9614f9ef84308e634
|
[
"MIT"
] | null | null | null |
# Command Line 'www.tureng.com' Search Interface
# Version: 1.0.0
# By Hx0z
# Github: https://www.github.com/iPatavatsizz/tureng_cli
# Import
import sys
from collections.abc import Callable
import signal
import asyncio
import requests as rq
import bs4 as bs
# Errors
class _tureng(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
class error(_tureng):
def __init__(self, message):
self.message = message
super().__init__(self.message)
# Classes
class word:
"""Word object represents tureng words,
contains:
name -> the name of the word
meanings -> the meanings list of the word
len() -> the function returning len of the word's meanings list'
"""
def __init__(self, name, meanings):
self.name = name
self.meanings = meanings
def __repr__(self):
return f"<tureng.word(name = '{self.name}', meanings = {self.meanings[:3]}"
def len(self):
return len(self.meanings)
class tureng():
"""Tureng engine for searching word fast.
contains:
command() -> The command decorator for creating cli commands
exmple:
engine = tureng()
@engine.command
def myCommand(args):
...
commands -> list of added commands
search(word) -> word search func searching the meanings of searched word
"""
def __init__(self):
# super().__init__()
self.commands = {}
self._history = []
# Set local commands
self.command(self.history)
self.command(self.help)
self.command(self.exit)
def __repr__(self):
c = []
for name in self.commands:
c.append(name)
return f"<tureng.base(commands = {c}, history = {self._history})>"
def __dir__(self):
return [
"command",
"commands",
"_history",
"search"
]
def command(self, cmd):
if not isinstance(cmd, Callable):
raise error(message="You can only use any method types to define a new command.")
if not asyncio.iscoroutinefunction(cmd):
raise error(message="All command types must be defined with 'async def func_name(self, args): ...'!")
else:
if hasattr(cmd, '__func__'):
name = cmd.__func__.__name__
else:
name = cmd.__name__
self.commands.update({name: cmd})
return True
async def parse(self, text):
command = text.split()
if command[0].startswith("!"):
command[0] = command[0][1:]
return command
async def is_command(self, text):
if len(text) <= 0:
print("Please type a word.")
return None
if text.startswith("!"):
c = await self.parse(text)
tname = c[0]
targs = c[1:]
for name, command in self.commands.items():
if tname == name:
if hasattr(command, '__func__'):
await command(targs)
else:
await command(self, targs)
return True
print(f"the '{tname}' is not a command.")
return None
return False
async def search(self, _word: str):
c = await self.is_command(_word)
if not c and c is not None:
connect = rq.get(f"https://tureng.com/tr/turkce-ingilizce/{_word}")
soup = bs.BeautifulSoup(connect.content, "html.parser")
meanings = soup.find_all('td', {"class": "tr ts"})
if len(meanings) == 0:
print(f"'{_word}' is not exists.")
else:
print(f"Found {len(meanings)} meaning of '{_word}'")
w = word(_word, [])
for count, meaning in enumerate(meanings):
w.meanings.append(meaning.text)
print(f"{count + 1}. {meaning.text[:-1]}")
self._history.append(w)
# Commands
async def history(self, args):
if len(self._history) == 0:
print("History is clean.")
else:
largs = len(args)
output = None
if largs == 0:
for word in self._history:
msg = f"'{word.name}' has {word.len()} meanings."
print(msg)
for c, m in enumerate(word.meanings):
print(c + 1, m)
print(len(msg) * "-")
elif largs == 1:
print(args)
for word in self._history:
if args[0] == word.name:
print(f"'{word.name}' has {word.len()} meanings.")
for c, m in enumerate(word.meanings):
print(c + 1, m)
output = True
print(output)
elif largs == 2:
for word in self._history:
if args[0] == word.name:
max = int(largs)
print(f"'{word.name}' has {word.len()} meanings.")
for c, m in enumerate(word.meanings):
if not c >= max:
print(c + 1, m)
print(f"{args[0]} not found.")
async def exit(self, args):
print("Exiting, bye...")
sys.exit(0)
async def help(self, args):
if len(self.commands) == 0:
print("There is no command to show.")
else:
print(f"------------- commands [{len(self.commands)}] -------------")
for count, cmd in enumerate(self.commands):
print(f"{count + 1}: '!{cmd}'")
print("----------------------------------------")
def detect_interrupts(sig, frame):
print("exit request.")
sys.exit(0)
signal.signal(signal.SIGINT, detect_interrupts)
signal.signal(signal.SIGBREAK, detect_interrupts)
if __name__ == '__main__':
engine = tureng()
@engine.command
async def testcommand(self, args):
print("testcommand")
async def main():
if len(sys.argv) > 1:
await engine.search(sys.argv[1])
else:
print("type '!help' for seeing commands.")
while True:
word = str(input(">>> "))
await engine.search(word)
async def run_engine():
task = asyncio.create_task(main())
await task
asyncio.run(run_engine())
| 31.454976
| 113
| 0.508212
|
4b6e66594a7de1130ef6e55f4c3489bd8a5e5351
| 2,472
|
py
|
Python
|
demos/setup.py
|
istresearch/Dallinger
|
47e4967ded9e01edbc8c1ae7132c9ec30a87f116
|
[
"MIT"
] | null | null | null |
demos/setup.py
|
istresearch/Dallinger
|
47e4967ded9e01edbc8c1ae7132c9ec30a87f116
|
[
"MIT"
] | null | null | null |
demos/setup.py
|
istresearch/Dallinger
|
47e4967ded9e01edbc8c1ae7132c9ec30a87f116
|
[
"MIT"
] | 1
|
2019-02-07T14:16:39.000Z
|
2019-02-07T14:16:39.000Z
|
import os
import sys
from setuptools import setup, find_packages
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
setup_args = dict(
name='dlgr.demos',
version="4.0.0",
description='Demonstration experiments for Dallinger',
url='http://github.com/Dallinger/Dallinger',
maintainer='Jordan Suchow',
maintainer_email='suchow@berkeley.edu',
license='MIT',
keywords=['science', 'cultural evolution', 'experiments', 'psychology'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
packages=find_packages('.'),
package_dir={'': '.'},
namespace_packages=['dlgr'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
],
entry_points={
'dallinger.experiments': [
'Bartlett1932 = dlgr.demos.bartlett1932.experiment:Bartlett1932',
'TwentyFortyEight = dlgr.demos.twentyfortyeight.experiment:TwentyFortyEight',
'CoordinationChatroom = dlgr.demos.chatroom.experiment:CoordinationChatroom',
'ConcentrationGame = dlgr.demos.concentration.experiment:ConcentrationGame',
'FunctionLearning = dlgr.demos.function_learning.experiment:FunctionLearning',
'IteratedDrawing = dlgr.demos.iterated_drawing.experiment:IteratedDrawing',
'MCMCP = dlgr.demos.mcmcp.experiment:MCMCP',
'RogersExperiment = dlgr.demos.rogers.experiment:RogersExperiment',
'SheepMarket = dlgr.demos.sheep_market.experiment:SheepMarket',
'SnakeGame = dlgr.demos.snake.experiment:SnakeGame',
'VoxPopuli = dlgr.demos.vox_populi.experiment:VoxPopuli',
],
},
)
# Read in requirements.txt for dependencies.
setup_args['install_requires'] = install_requires = []
setup_args['dependency_links'] = dependency_links = []
with open('requirements.txt') as f:
for line in f.readlines():
req = line.strip()
if not req or req.startswith('#'):
continue
if req.startswith('-e '):
dependency_links.append(req[3:])
else:
install_requires.append(req)
setup(**setup_args)
| 36.352941
| 90
| 0.651294
|
c2a7dcf8c2e28dd602437c74a453bcb7a8b92ab6
| 1,691
|
py
|
Python
|
scripts/plot_Zprofiles.py
|
scbao/pysonic
|
b4ccaf49772d55f632a0995c411d1cc042d71903
|
[
"MIT"
] | null | null | null |
scripts/plot_Zprofiles.py
|
scbao/pysonic
|
b4ccaf49772d55f632a0995c411d1cc042d71903
|
[
"MIT"
] | null | null | null |
scripts/plot_Zprofiles.py
|
scbao/pysonic
|
b4ccaf49772d55f632a0995c411d1cc042d71903
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Theo Lemaire
# @Email: theo.lemaire@epfl.ch
# @Date: 2020-08-17 15:29:27
# @Last Modified by: Theo Lemaire
# @Last Modified time: 2020-08-17 19:24:19
import logging
import numpy as np
import matplotlib.pyplot as plt
from PySONIC.core import Batch, BilayerSonophore, AcousticDrive
from PySONIC.utils import logger
logger.setLevel(logging.INFO)
def plotZProfiles(bls, drive, Qrange, mpi=False, ax=None):
queue = bls.simQueue([drive.f], [drive.A], Qrange)
batch = Batch(bls.getZlast, queue)
outputs = batch(mpi=mpi)
Zprofiles = np.array(outputs)
t = np.linspace(0., 1. / f, Zprofiles.shape[1])
add_legend = False
if ax is None:
fig, ax = plt.subplots()
add_legend = True
ax.set_title(drive.desc)
ax.set_xlabel('t (us)')
ax.set_ylabel('Z (nm)')
handles = []
for Z, Q in zip(Zprofiles, Qrange):
handles.append(ax.plot(t * 1e6, Z * 1e9, label=f'Qm = {Q * 1e5:.0f} nC/cm2'))
if add_legend:
ax.legend(loc=1, frameon=False)
else:
return handles
if __name__ == '__main__':
# Model
a = 32e-9 # m
Cm0 = 1e-2 # F/m2
Qm0 = 0. # C/m2
bls = BilayerSonophore(a, Cm0, Qm0)
# Stimulation parameters
freqs = np.array([20., 100., 500., 2500.]) * 1e3 # Hz
amps = np.array([10., 50., 100., 500., 1000.]) * 1e3 # Pa
# Charges
Qrange = np.linspace(0., 100., 6) * 1e-5 # C/m2
# Sims and plots
fig, axes = plt.subplots(freqs.size, amps.size)
for i, f in enumerate(freqs):
for j, A in enumerate(amps):
handles = plotZProfiles(bls, AcousticDrive(f, A), Qrange, ax=axes[i, j])
plt.show()
| 27.274194
| 85
| 0.607333
|
15fa2515f2706757730bc95f3598edc066d50f81
| 4,757
|
py
|
Python
|
data/eu-data/scripts/download_at.py
|
juhaodong/German-Covid-19
|
34316ec56f974f924070fa43ccfa44a9df830159
|
[
"MIT"
] | null | null | null |
data/eu-data/scripts/download_at.py
|
juhaodong/German-Covid-19
|
34316ec56f974f924070fa43ccfa44a9df830159
|
[
"MIT"
] | null | null | null |
data/eu-data/scripts/download_at.py
|
juhaodong/German-Covid-19
|
34316ec56f974f924070fa43ccfa44a9df830159
|
[
"MIT"
] | null | null | null |
import html
import logging
import os
import re
import dateutil
import pandas as pd
import requests
from lxml import etree
import lxml
from functools import reduce
from utils import _COLUMNS_ORDER, COVIDScrapper, DailyAggregator
logging.basicConfig()
logger = logging.getLogger("covid-eu-data.download.at")
AT_REPORT_URL = "https://www.sozialministerium.at/Informationen-zum-Coronavirus/Neuartiges-Coronavirus-(2019-nCov).html"
DAILY_FOLDER = os.path.join("dataset", "daily", "at")
AT_STATES = [
"Burgenland",
"Kärnten",
"Niederösterreich",
"Oberösterreich",
"Salzburg",
"Steiermark",
"Tirol",
"Vorarlberg",
"Wien"
]
class SARSCOV2AT(COVIDScrapper):
def __init__(self, url=None, daily_folder=None):
if url is None:
url = AT_REPORT_URL
if daily_folder is None:
daily_folder = DAILY_FOLDER
COVIDScrapper.__init__(self, url, country="AT", daily_folder=daily_folder)
def extract_table(self):
"""Load data table from web page
"""
doc = lxml.html.document_fromstring(self.req.text)
el = doc.xpath('.//div[@class="infobox"]')
if el:
paragraphs = [
"".join(i.xpath('.//text()')) for i in el[0].xpath('.//p')
]
cases_text = ""
recovered_text = ""
deaths_text = ""
for par in paragraphs:
if par.startswith("Bestätigte Fälle, "):
cases_text = par
cases_text = html.unescape(cases_text)
elif par.startswith("Genesene Personen, "):
recovered_text = par
recovered_text = html.unescape(recovered_text)
elif par.startswith("Todesfälle, "):
deaths_text = par
deaths_text = html.unescape(deaths_text)
else:
raise Exception("Could not find infobox")
re_cases = re.compile(r'\s(\w*?)\s\((\d+)\)')
re_deaths = re.compile(r'\s(\d+)\s\((\w+)\)')
cases = [i for i in re_cases.findall(cases_text) if i[0] in AT_STATES]
cases = [(s, v.replace('.','').replace(',','.')) for s,v in cases]
recovered = [
i for i in re_cases.findall(recovered_text) if i[0] in AT_STATES
]
recovered = [(s, v.replace('.','').replace(',','.')) for s,v in recovered]
deaths = [i for i in re_deaths.findall(deaths_text) if i[-1] in AT_STATES]
deaths = [(v.replace('.','').replace(',','.'), s) for v, s in deaths]
if not cases:
raise Exception("Could not find cases_text in webpage")
df_cases = pd.DataFrame(
cases, columns=["state", "cases"]
)
df_recovered = pd.DataFrame(
recovered, columns=["state", "recovered"]
)
df_deaths = pd.DataFrame(
deaths, columns=["deaths", "state"]
)
self.df = reduce(
lambda left,right: pd.merge(
left,right,on=['state'], how='outer'
), [df_cases, df_recovered, df_deaths]
)
self.df.fillna(0, inplace=True)
self.df["cases"] = self.df.cases.astype(int)
self.df["recovered"] = self.df.recovered.astype(int)
self.df["deaths"] = self.df.deaths.astype(int)
total = self.df[["cases", "recovered", "deaths"]].sum()
total["state"] = "sum"
self.df = self.df.append(
pd.DataFrame(
total
).T
)
logger.info("cases:\n", self.df)
def extract_datetime(self):
"""Get datetime of dataset
Aktuelle Situation Österreich 04.03.2020 / 17:45 Uhr
Stand, 10.03.2020, 08:00 Uhr
"""
doc = lxml.html.document_fromstring(self.req.text)
el = doc.xpath('.//div[@class="infobox"]')
if el:
text = "".join(
el[0].xpath('.//text()')
)
re_dt = re.compile(r'Bestätigte Fälle, Stand (\d{1,2}.\d{1,2}.\d{4}, \d{1,2}:\d{1,2}) Uhr:')
text = html.unescape(text)
dt_from_re = re_dt.findall(text)
if not dt_from_re:
raise Exception("Did not find datetime from webpage")
dt_from_re = dt_from_re[0].replace("/", "")
dt_from_re = dateutil.parser.parse(dt_from_re, dayfirst=True)
self.dt = dt_from_re
def post_processing(self):
self.df.sort_values(by="cases", inplace=True)
if __name__ == "__main__":
cov_at = SARSCOV2AT()
cov_at.workflow()
print(cov_at.df)
da = DailyAggregator(
base_folder="dataset",
daily_folder=DAILY_FOLDER,
country="AT",
fill=False
)
da.workflow()
print("End of Game")
| 30.299363
| 120
| 0.559596
|
4940d942e71f24132d08bc3684eed14786403e7c
| 2,132
|
py
|
Python
|
api/environments/managers.py
|
ekampf/flagsmith
|
35d1944de9763f02de5d5d1793d5b29b7fe28993
|
[
"BSD-3-Clause"
] | null | null | null |
api/environments/managers.py
|
ekampf/flagsmith
|
35d1944de9763f02de5d5d1793d5b29b7fe28993
|
[
"BSD-3-Clause"
] | null | null | null |
api/environments/managers.py
|
ekampf/flagsmith
|
35d1944de9763f02de5d5d1793d5b29b7fe28993
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models import Manager, Prefetch
from features.models import FeatureSegment, FeatureState
from features.multivariate.models import MultivariateFeatureStateValue
class EnvironmentManager(Manager):
def filter_for_document_builder(self, *args, **kwargs):
return (
super(EnvironmentManager, self)
.select_related(
"project",
"project__organisation",
"mixpanel_config",
"segment_config",
"amplitude_config",
"heap_config",
)
.prefetch_related(
Prefetch(
"feature_states",
queryset=FeatureState.objects.select_related(
"feature", "feature_state_value"
),
),
Prefetch(
"feature_states__multivariate_feature_state_values",
queryset=MultivariateFeatureStateValue.objects.select_related(
"multivariate_feature_option"
),
),
"project__segments",
"project__segments__rules",
"project__segments__rules__rules",
"project__segments__rules__conditions",
Prefetch(
"project__segments__feature_segments",
queryset=FeatureSegment.objects.select_related("segment"),
),
Prefetch(
"project__segments__feature_segments__feature_states",
queryset=FeatureState.objects.select_related(
"feature", "feature_state_value"
),
),
Prefetch(
"project__segments__feature_segments__feature_states__multivariate_feature_state_values",
queryset=MultivariateFeatureStateValue.objects.select_related(
"multivariate_feature_option"
),
),
)
.filter(*args, **kwargs)
)
| 38.763636
| 109
| 0.529081
|
0c3778ff4d081d1f9a7857429508e57173a5534b
| 2,845
|
py
|
Python
|
Resources/books/deep_learning_time_series_forecasting/code/chapter_22/08_plot_activity_durations.py
|
gdepalma93/bright-athlete-academy
|
54ba0cc6633637c1bd6d90120153e04b981244bf
|
[
"MIT"
] | null | null | null |
Resources/books/deep_learning_time_series_forecasting/code/chapter_22/08_plot_activity_durations.py
|
gdepalma93/bright-athlete-academy
|
54ba0cc6633637c1bd6d90120153e04b981244bf
|
[
"MIT"
] | null | null | null |
Resources/books/deep_learning_time_series_forecasting/code/chapter_22/08_plot_activity_durations.py
|
gdepalma93/bright-athlete-academy
|
54ba0cc6633637c1bd6d90120153e04b981244bf
|
[
"MIT"
] | null | null | null |
# plot durations of each activity by subject from the har dataset
from numpy import dstack
from numpy import unique
from pandas import read_csv
from matplotlib import pyplot
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# load a list of files, such as x, y, z data for a given variable
def load_group(filenames, prefix=''):
loaded = list()
for name in filenames:
data = load_file(prefix + name)
loaded.append(data)
# stack group so that features are the 3rd dimension
loaded = dstack(loaded)
return loaded
# load a dataset group, such as train or test
def load_dataset(group, prefix=''):
filepath = prefix + group + '/Inertial Signals/'
# load all 9 files as a single array
filenames = list()
# total acceleration
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt']
# body acceleration
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt']
# body gyroscope
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt']
# load input data
X = load_group(filenames, filepath)
# load class output
y = load_file(prefix + group + '/y_'+group+'.txt')
return X, y
# get all data for one subject
def data_for_subject(X, y, sub_map, sub_id):
# get row indexes for the subject id
ix = [i for i in range(len(sub_map)) if sub_map[i]==sub_id]
# return the selected samples
return X[ix, :, :], y[ix]
# convert a series of windows to a 1D list
def to_series(windows):
series = list()
for window in windows:
# remove the overlap from the window
half = int(len(window) / 2) - 1
for value in window[-half:]:
series.append(value)
return series
# group data by activity
def data_by_activity(X, y, activities):
# group windows by activity
return {a:X[y[:,0]==a, :, :] for a in activities}
# plot activity durations by subject
def plot_activity_durations_by_subject(X, y, sub_map):
# get unique subjects and activities
subject_ids = unique(sub_map[:,0])
activity_ids = unique(y[:,0])
# enumerate subjects
activity_windows = {a:list() for a in activity_ids}
for sub_id in subject_ids:
# get data for one subject
_, subj_y = data_for_subject(X, y, sub_map, sub_id)
# count windows by activity
for a in activity_ids:
activity_windows[a].append(len(subj_y[subj_y[:,0]==a]))
# organize durations into a list of lists
durations = [activity_windows[a] for a in activity_ids]
pyplot.boxplot(durations, labels=activity_ids)
pyplot.show()
# load training dataset
X, y = load_dataset('train', 'HARDataset/')
# load mapping of rows to subjects
sub_map = load_file('HARDataset/train/subject_train.txt')
# plot durations
plot_activity_durations_by_subject(X, y, sub_map)
| 33.869048
| 101
| 0.726889
|
2cc0eeef8811bd7130e89bb515981181446d0ff6
| 8,732
|
py
|
Python
|
b2sdk/file_version.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
b2sdk/file_version.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
b2sdk/file_version.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
######################################################################
#
# File: b2sdk/file_version.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from typing import Optional
from .encryption.setting import EncryptionSetting, EncryptionSettingFactory
from .file_lock import FileRetentionSetting, LegalHold
class FileVersionInfo(object):
"""
A structure which represents a version of a file (in B2 cloud).
:ivar str ~.id\_: ``fileId``
:ivar str ~.file_name: full file name (with path)
:ivar ~.size: size in bytes, can be ``None`` (unknown)
:vartype ~.size: int or None
:ivar str ~.content_type: RFC 822 content type, for example ``"application/octet-stream"``
:ivar ~.content_sha1: sha1 checksum of the entire file, can be ``None`` (unknown) if it is a large file uploaded by a client which did not provide it
:vartype ~.content_sha1: str or None
:ivar ~.content_md5: md5 checksum of the file, can be ``None`` (unknown)
:vartype ~.content_md5: str or None
:ivar dict ~.file_info: file info dict
:ivar ~.upload_timestamp: in milliseconds since :abbr:`epoch (1970-01-01 00:00:00)`. Can be ``None`` (unknown).
:vartype ~.upload_timestamp: int or None
:ivar str ~.action: ``"upload"``, ``"hide"`` or ``"delete"``
"""
__slots__ = [
'id_',
'file_name',
'size',
'content_type',
'content_sha1',
'content_md5',
'file_info',
'upload_timestamp',
'action',
'server_side_encryption',
'legal_hold',
'file_retention',
]
def __init__(
self,
id_,
file_name,
size,
content_type,
content_sha1,
file_info,
upload_timestamp,
action,
content_md5=None,
server_side_encryption: Optional[EncryptionSetting] = None, # TODO: make it mandatory in v2
file_retention: Optional[
FileRetentionSetting
] = None, # TODO: in v2 change the default value to NO_RETENTION_FILE_SETTING
legal_hold: Optional[LegalHold
] = None, # TODO: in v2 change the default value to LegalHold.UNSET
):
self.id_ = id_
self.file_name = file_name
self.size = size
self.content_type = content_type
self.content_sha1 = content_sha1
self.content_md5 = content_md5
self.file_info = file_info or {}
self.upload_timestamp = upload_timestamp
self.action = action
self.server_side_encryption = server_side_encryption
self.legal_hold = legal_hold
self.file_retention = file_retention
def as_dict(self):
""" represents the object as a dict which looks almost exactly like the raw api output for upload/list """
result = {
'fileId': self.id_,
'fileName': self.file_name,
'fileInfo': self.file_info,
'legalHold': self.legal_hold.to_dict_repr() if self.legal_hold is not None else None,
}
if self.size is not None:
result['size'] = self.size
if self.upload_timestamp is not None:
result['uploadTimestamp'] = self.upload_timestamp
if self.action is not None:
result['action'] = self.action
if self.content_type is not None:
result['contentType'] = self.content_type
if self.content_sha1 is not None:
result['contentSha1'] = self.content_sha1
if self.content_md5 is not None:
result['contentMd5'] = self.content_md5
if self.server_side_encryption is not None: # this is for backward compatibility of interface only, b2sdk always sets it
result['serverSideEncryption'] = self.server_side_encryption.as_dict()
if self.file_retention is not None: # this is for backward compatibility of interface only, b2sdk always sets it
result['fileRetention'] = self.file_retention.as_dict()
return result
def __eq__(self, other):
sentry = object()
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr, sentry):
return False
return True
class FileVersionInfoFactory(object):
"""
Construct :py:class:`b2sdk.v1.FileVersionInfo` objects from various structures.
"""
@classmethod
def from_api_response(cls, file_info_dict, force_action=None):
"""
Turn this:
.. code-block:: python
{
"action": "hide",
"fileId": "4_zBucketName_f103b7ca31313c69c_d20151230_m030117_c001_v0001015_t0000",
"fileName": "randomdata",
"size": 0,
"uploadTimestamp": 1451444477000
}
or this:
.. code-block:: python
{
"accountId": "4aa9865d6f00",
"bucketId": "547a2a395826655d561f0010",
"contentLength": 1350,
"contentSha1": "753ca1c2d0f3e8748320b38f5da057767029a036",
"contentType": "application/octet-stream",
"fileId": "4_z547a2a395826655d561f0010_f106d4ca95f8b5b78_d20160104_m003906_c001_v0001013_t0005",
"fileInfo": {},
"fileName": "randomdata",
"serverSideEncryption": {"algorithm": "AES256", "mode": "SSE-B2"}
}
into a :py:class:`b2sdk.v1.FileVersionInfo` object.
"""
assert file_info_dict.get('action') is None or force_action is None, \
'action was provided by both info_dict and function argument'
action = file_info_dict.get('action') or force_action
file_name = file_info_dict['fileName']
id_ = file_info_dict['fileId']
if 'size' in file_info_dict:
size = file_info_dict['size']
elif 'contentLength' in file_info_dict:
size = file_info_dict['contentLength']
else:
raise ValueError('no size or contentLength')
upload_timestamp = file_info_dict.get('uploadTimestamp')
content_type = file_info_dict.get('contentType')
content_sha1 = file_info_dict.get('contentSha1')
content_md5 = file_info_dict.get('contentMd5')
file_info = file_info_dict.get('fileInfo')
server_side_encryption = EncryptionSettingFactory.from_file_version_dict(file_info_dict)
file_retention = FileRetentionSetting.from_file_version_dict(file_info_dict)
legal_hold = LegalHold.from_file_version_dict(file_info_dict)
return FileVersionInfo(
id_,
file_name,
size,
content_type,
content_sha1,
file_info,
upload_timestamp,
action,
content_md5,
server_side_encryption,
file_retention,
legal_hold,
)
@classmethod
def from_cancel_large_file_response(cls, response):
return FileVersionInfo(
response['fileId'],
response['fileName'],
0, # size
'unknown',
'none',
{},
0, # upload timestamp
'cancel'
)
@classmethod
def from_response_headers(cls, headers):
return FileVersionInfo(
id_=headers.get('x-bz-file-id'),
file_name=headers.get('x-bz-file-name'),
size=headers.get('content-length'),
content_type=headers.get('content-type'),
content_sha1=headers.get('x-bz-content-sha1'),
file_info=None,
upload_timestamp=headers.get('x-bz-upload-timestamp'),
action=None,
server_side_encryption=EncryptionSettingFactory.from_response_headers(headers),
file_retention=FileRetentionSetting.from_response_headers(headers),
legal_hold=LegalHold.from_response_headers(headers),
)
class FileIdAndName(object):
"""
A structure which represents a B2 cloud file with just `file_name` and `fileId` attributes.
Used to return data from calls to :py:meth:`b2sdk.v1.Bucket.delete_file_version`.
:ivar str ~.file_id: ``fileId``
:ivar str ~.file_name: full file name (with path)
"""
def __init__(self, file_id, file_name):
self.file_id = file_id
self.file_name = file_name
def as_dict(self):
""" represents the object as a dict which looks almost exactly like the raw api output for delete_file_version """
return {'action': 'delete', 'fileId': self.file_id, 'fileName': self.file_name}
| 36.535565
| 153
| 0.608566
|
736714689e3f0a851c5037a9363a538be2e070da
| 8,455
|
py
|
Python
|
3_core_core_analysis/4_find_related_acc_genes.py
|
greenelab/core-accessory-interactome
|
98e3f9a3036373fc5734181832f11852a75aa914
|
[
"BSD-3-Clause"
] | null | null | null |
3_core_core_analysis/4_find_related_acc_genes.py
|
greenelab/core-accessory-interactome
|
98e3f9a3036373fc5734181832f11852a75aa914
|
[
"BSD-3-Clause"
] | 33
|
2020-04-24T23:07:49.000Z
|
2022-03-10T22:53:09.000Z
|
3_core_core_analysis/4_find_related_acc_genes.py
|
greenelab/core-accessory-interactome
|
98e3f9a3036373fc5734181832f11852a75aa914
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T17:09:27.000Z
|
2020-04-01T17:09:27.000Z
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:core_acc] *
# language: python
# name: conda-env-core_acc-py
# ---
# # Find co-expressed accessory genes
#
# Later we analyze core genes in relation to accessory genes. Therefore, this notebook reports the accessory genes that the core genes are most co-expressed with.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
import random
import scipy
import pandas as pd
import seaborn as sns
import numpy as np
from scripts import utils, paths, gene_relationships, annotations
random.seed(1)
# -
# User params
use_operon = True
# ### Load correlation matrix
# +
# Load correlation matrix
pao1_corr_filename = paths.PAO1_CORR_LOG_SPELL
pa14_corr_filename = paths.PA14_CORR_LOG_SPELL
pao1_corr = pd.read_csv(pao1_corr_filename, sep="\t", index_col=0, header=0)
pa14_corr = pd.read_csv(pa14_corr_filename, sep="\t", index_col=0, header=0)
# -
# Make a dataframe with gene ids
pao1_membership = pd.DataFrame(data=[], index=pao1_corr.index)
print(pao1_membership.shape)
pao1_membership.head()
pa14_membership = pd.DataFrame(data=[], index=pa14_corr.index)
print(pa14_membership.shape)
pa14_membership.head()
# ### Load and get least stable core genes
# +
# Load transcriptional similarity df
# These are the subset of genes that we will consider
pao1_similarity_scores_filename = "pao1_similarity_scores_spell.tsv"
pa14_similarity_scores_filename = "pa14_similarity_scores_spell.tsv"
pao1_similarity_scores = pd.read_csv(
pao1_similarity_scores_filename, sep="\t", header=0, index_col=0
)
pa14_similarity_scores = pd.read_csv(
pa14_similarity_scores_filename, sep="\t", header=0, index_col=0
)
# +
# Get most and least stable core genes
pao1_least_stable_genes = list(
pao1_similarity_scores[pao1_similarity_scores["label"] == "least stable"].index
)
pa14_least_stable_genes = list(
pa14_similarity_scores[pa14_similarity_scores["label"] == "least stable"].index
)
# -
# ### Load core/accessory gene labels
# +
# Read in expression data
pao1_expression_filename = paths.PAO1_COMPENDIUM
pa14_expression_filename = paths.PA14_COMPENDIUM
pao1_expression = pd.read_csv(pao1_expression_filename, sep="\t", index_col=0, header=0)
pa14_expression = pd.read_csv(pa14_expression_filename, sep="\t", index_col=0, header=0)
# +
pao1_annot_filename = paths.GENE_PAO1_ANNOT
pa14_annot_filename = paths.GENE_PA14_ANNOT
core_acc_dict = utils.get_my_core_acc_genes(
pao1_annot_filename, pa14_annot_filename, pao1_expression, pa14_expression
)
# -
pao1_core = core_acc_dict["core_pao1"]
pa14_core = core_acc_dict["core_pa14"]
pao1_acc = core_acc_dict["acc_pao1"]
pa14_acc = core_acc_dict["acc_pa14"]
pao1_membership.loc[pao1_core, "core/acc"] = "core"
pao1_membership.loc[pao1_acc, "core/acc"] = "acc"
# pa14_acc_shared = set(pa14_acc).intersection(pa14_gene_module_labels.index)
pa14_membership.loc[pa14_core, "core/acc"] = "core"
pa14_membership.loc[pa14_acc, "core/acc"] = "acc"
# Drop "module id" column
pao1_arr = pao1_membership
pa14_arr = pa14_membership
# Make sure to sort by gene id
# NOTE PA14 gene ids don't increment by 1, but by 10 or 20 are we missing some genes?
pao1_arr = pao1_arr.sort_index()
pa14_arr = pa14_arr.sort_index()
# ### Load operon annotations
pao1_operon_filename = paths.PAO1_OPERON
pa14_operon_filename = paths.PA14_OPERON
pao1_operon = annotations.load_format_operons(pao1_operon_filename)
pa14_operon = annotations.load_format_operons(pa14_operon_filename)
if use_operon:
pao1_operon_expression_to_use = pao1_operon
pa14_operon_expression_to_use = pa14_operon
else:
pao1_operon_expression_to_use = None
pa14_operon_expression_to_use = None
# ### Find all accessory genes co-expressed lasR gene
#
# Our collaborator shared an experiment comparing the transcriptome of WT vs lasR mutant strains and found genes in PA14-only genes that were significant. So we were curious to see if any of these PA14-only DEGs (genes highlighted in red that were provided) were co-regulated by lasR according to our analysis.
pa14_lasR_acc_relationships = gene_relationships.find_related_acc_genes(
pa14_corr,
["PA14_45960"],
pa14_arr,
40,
pa14_operon_expression_to_use,
)
print(pa14_lasR_acc_relationships.shape)
pa14_lasR_acc_relationships.head()
# Read in PA14-only lasR genes
pa14_lasR_genes_Deb_filename = os.path.join(
paths.LOCAL_DATA_DIR, "lasR regulated PA14 genes for Alex.csv"
)
pa14_lasR_genes_Deb = pd.read_csv(pa14_lasR_genes_Deb_filename, index_col=0, header=1)
print(pa14_lasR_genes_Deb.shape)
pa14_lasR_genes_Deb.head()
print(len(list(pa14_lasR_genes_Deb.index)))
shared_genes = set(pa14_lasR_genes_Deb.index).intersection(pa14_corr.index)
print(len(shared_genes))
# Format df for plotting
lasR_corr = pa14_corr.loc["PA14_45960"].to_frame("values")
lasR_corr["label"] = ""
lasR_corr.loc[pa14_acc, "label"] = "acc"
lasR_corr.loc[shared_genes, "label"] = "experiment DEGs"
lasR_corr.head()
# Plot distribution of correlation scores
fig_pao1 = sns.displot(
data=lasR_corr,
x="values",
hue="label",
alpha=0.3,
bins=np.linspace(-0.4, 1.0, 50),
)
# Let's try looking at genes within the same module as lasR
cluster_method = "affinity"
gene_subset = "all"
processed = "raw"
pa14_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR, f"pa14_modules_{cluster_method}_{gene_subset}_{processed}.tsv"
)
pa14_membership = pd.read_csv(pa14_membership_filename, sep="\t", index_col=0, header=0)
pa14_membership.head()
lasR_module_id = pa14_membership.loc["PA14_45960"].values[0]
print(lasR_module_id)
# Get genes in the same module as lasR
lasR_coexpression_module_genes = pa14_membership[
pa14_membership["module id"] == lasR_module_id
].index
# Compare gene lists
set(pa14_lasR_genes_Deb.index).intersection(lasR_coexpression_module_genes)
# If we look at the distribution of co-expression scores for lasR vs all genes, accessory genes, Deb’s genes, the correlation scores range from (-0.2, 0.2), which is lower compared to other core genes. The lasR accessory (PA14-only) genes are found starting in the top 40 genes.
#
# If we cluster correlation matrix that includes all genes (core and accessory), there are 0 genes that are within the lasR cluster and found in Deb’s genes (highlighted ones only ~80).
#
# ### Find all accessory genes co-expressed with least stable core genes
# %%time
pao1_least_acc_relationships = gene_relationships.find_related_acc_genes(
pao1_corr,
pao1_least_stable_genes,
pao1_arr,
10,
pao1_operon_expression_to_use,
)
pao1_least_acc_relationships.head()
# %%time
pa14_least_acc_relationships = gene_relationships.find_related_acc_genes(
pa14_corr,
pa14_least_stable_genes,
pa14_arr,
10,
pa14_operon_expression_to_use,
)
pa14_least_acc_relationships.head()
# ### Add accessory gene list to core-core annotation df
# Load current core-core annotations
pao1_core_stable_similarity_filename = "pao1_core_similarity_associations_spell.tsv"
pa14_core_stable_similarity_filename = "pa14_core_similarity_associations_spell.tsv"
pao1_all_associations = pd.read_csv(
pao1_core_stable_similarity_filename, sep="\t", header=0, index_col=0
)
pa14_all_associations = pd.read_csv(
pa14_core_stable_similarity_filename, sep="\t", header=0, index_col=0
)
print(pao1_all_associations.shape)
print(pa14_all_associations.shape)
# Merge KEGG associations with transcriptional similarity information
pao1_all_associations = pao1_all_associations.merge(
pao1_least_acc_relationships, left_index=True, right_index=True, how="left"
)
pa14_all_associations = pa14_all_associations.merge(
pa14_least_acc_relationships, left_index=True, right_index=True, how="left"
)
# Check that the dimension is consistent before and after merge
print(pao1_all_associations.shape)
print(pa14_all_associations.shape)
pao1_all_associations.head()
pao1_all_associations.sort_values(by="label").head()
pa14_all_associations.sort_values(by="label").head()
# Save
pao1_all_associations.to_csv(
"pao1_core_similarity_associations_final_spell.tsv", sep="\t"
)
pa14_all_associations.to_csv(
"pa14_core_similarity_associations_final_spell.tsv", sep="\t"
)
| 30.196429
| 310
| 0.779066
|
1859560dc4d26fcb16f09a80a40bc599dbf35cf2
| 9,397
|
py
|
Python
|
tests/test_asgi.py
|
artcg/sanic
|
8a2ea626c6d04a5eb1e28d071ffa56bf9ad98a12
|
[
"MIT"
] | null | null | null |
tests/test_asgi.py
|
artcg/sanic
|
8a2ea626c6d04a5eb1e28d071ffa56bf9ad98a12
|
[
"MIT"
] | 1
|
2021-03-01T18:55:22.000Z
|
2021-03-01T18:55:22.000Z
|
tests/test_asgi.py
|
angrycaptain19/sanic
|
27f64ddae2bd5a3ce40387f493d3e1b1068cdac7
|
[
"MIT"
] | null | null | null |
import asyncio
import sys
from collections import deque, namedtuple
import pytest
import uvicorn
from sanic import Sanic
from sanic.asgi import MockTransport
from sanic.exceptions import InvalidUsage
from sanic.request import Request
from sanic.response import json, text
from sanic.websocket import WebSocketConnection
@pytest.fixture
def message_stack():
return deque()
@pytest.fixture
def receive(message_stack):
async def _receive():
return message_stack.popleft()
return _receive
@pytest.fixture
def send(message_stack):
async def _send(message):
message_stack.append(message)
return _send
@pytest.fixture
def transport(message_stack, receive, send):
return MockTransport({}, receive, send)
@pytest.fixture
def protocol(transport):
return transport.get_protocol()
def test_listeners_triggered():
app = Sanic("app")
before_server_start = False
after_server_start = False
before_server_stop = False
after_server_stop = False
@app.listener("before_server_start")
def do_before_server_start(*args, **kwargs):
nonlocal before_server_start
before_server_start = True
@app.listener("after_server_start")
def do_after_server_start(*args, **kwargs):
nonlocal after_server_start
after_server_start = True
@app.listener("before_server_stop")
def do_before_server_stop(*args, **kwargs):
nonlocal before_server_stop
before_server_stop = True
@app.listener("after_server_stop")
def do_after_server_stop(*args, **kwargs):
nonlocal after_server_stop
after_server_stop = True
@app.route("/")
def handler(request):
return text("...")
class CustomServer(uvicorn.Server):
def install_signal_handlers(self):
pass
config = uvicorn.Config(app=app, loop="asyncio", limit_max_requests=0)
server = CustomServer(config=config)
with pytest.warns(UserWarning):
server.run()
all_tasks = asyncio.all_tasks(asyncio.get_event_loop())
for task in all_tasks:
task.cancel()
assert before_server_start
assert after_server_start
assert before_server_stop
assert after_server_stop
def test_listeners_triggered_async(app):
before_server_start = False
after_server_start = False
before_server_stop = False
after_server_stop = False
@app.listener("before_server_start")
async def do_before_server_start(*args, **kwargs):
nonlocal before_server_start
before_server_start = True
@app.listener("after_server_start")
async def do_after_server_start(*args, **kwargs):
nonlocal after_server_start
after_server_start = True
@app.listener("before_server_stop")
async def do_before_server_stop(*args, **kwargs):
nonlocal before_server_stop
before_server_stop = True
@app.listener("after_server_stop")
async def do_after_server_stop(*args, **kwargs):
nonlocal after_server_stop
after_server_stop = True
@app.route("/")
def handler(request):
return text("...")
class CustomServer(uvicorn.Server):
def install_signal_handlers(self):
pass
config = uvicorn.Config(app=app, loop="asyncio", limit_max_requests=0)
server = CustomServer(config=config)
with pytest.warns(UserWarning):
server.run()
all_tasks = asyncio.all_tasks(asyncio.get_event_loop())
for task in all_tasks:
task.cancel()
assert before_server_start
assert after_server_start
assert before_server_stop
assert after_server_stop
@pytest.mark.asyncio
async def test_mockprotocol_events(protocol):
assert protocol._not_paused.is_set()
protocol.pause_writing()
assert not protocol._not_paused.is_set()
protocol.resume_writing()
assert protocol._not_paused.is_set()
@pytest.mark.asyncio
async def test_protocol_push_data(protocol, message_stack):
text = b"hello"
await protocol.push_data(text)
await protocol.complete()
assert len(message_stack) == 2
message = message_stack.popleft()
assert message["type"] == "http.response.body"
assert message["more_body"]
assert message["body"] == text
message = message_stack.popleft()
assert message["type"] == "http.response.body"
assert not message["more_body"]
assert message["body"] == b""
@pytest.mark.asyncio
async def test_websocket_send(send, receive, message_stack):
text_string = "hello"
text_bytes = b"hello"
ws = WebSocketConnection(send, receive)
await ws.send(text_string)
await ws.send(text_bytes)
assert len(message_stack) == 2
message = message_stack.popleft()
assert message["type"] == "websocket.send"
assert message["text"] == text_string
assert "bytes" not in message
message = message_stack.popleft()
assert message["type"] == "websocket.send"
assert message["bytes"] == text_bytes
assert "text" not in message
@pytest.mark.asyncio
async def test_websocket_receive(send, receive, message_stack):
msg = {"text": "hello", "type": "websocket.receive"}
message_stack.append(msg)
ws = WebSocketConnection(send, receive)
text = await ws.receive()
assert text == msg["text"]
@pytest.mark.asyncio
async def test_websocket_accept_with_no_subprotocols(
send, receive, message_stack
):
ws = WebSocketConnection(send, receive)
await ws.accept()
assert len(message_stack) == 1
message = message_stack.popleft()
assert message["type"] == "websocket.accept"
assert message["subprotocol"] == ""
assert "bytes" not in message
@pytest.mark.asyncio
async def test_websocket_accept_with_subprotocol(send, receive, message_stack):
subprotocols = ["graphql-ws"]
ws = WebSocketConnection(send, receive, subprotocols)
await ws.accept()
assert len(message_stack) == 1
message = message_stack.popleft()
assert message["type"] == "websocket.accept"
assert message["subprotocol"] == "graphql-ws"
assert "bytes" not in message
@pytest.mark.asyncio
async def test_websocket_accept_with_multiple_subprotocols(
send, receive, message_stack
):
subprotocols = ["graphql-ws", "hello", "world"]
ws = WebSocketConnection(send, receive, subprotocols)
await ws.accept()
assert len(message_stack) == 1
message = message_stack.popleft()
assert message["type"] == "websocket.accept"
assert message["subprotocol"] == "graphql-ws,hello,world"
assert "bytes" not in message
def test_improper_websocket_connection(transport, send, receive):
with pytest.raises(InvalidUsage):
transport.get_websocket_connection()
transport.create_websocket_connection(send, receive)
connection = transport.get_websocket_connection()
assert isinstance(connection, WebSocketConnection)
@pytest.mark.asyncio
async def test_request_class_regular(app):
@app.get("/regular")
def regular_request(request):
return text(request.__class__.__name__)
_, response = await app.asgi_client.get("/regular")
assert response.body == b"Request"
@pytest.mark.asyncio
async def test_request_class_custom():
class MyCustomRequest(Request):
pass
app = Sanic(name=__name__, request_class=MyCustomRequest)
@app.get("/custom")
def custom_request(request):
return text(request.__class__.__name__)
_, response = await app.asgi_client.get("/custom")
assert response.body == b"MyCustomRequest"
@pytest.mark.asyncio
async def test_cookie_customization(app):
@app.get("/cookie")
def get_cookie(request):
response = text("There's a cookie up in this response")
response.cookies["test"] = "Cookie1"
response.cookies["test"]["httponly"] = True
response.cookies["c2"] = "Cookie2"
response.cookies["c2"]["httponly"] = False
return response
_, response = await app.asgi_client.get("/cookie")
CookieDef = namedtuple("CookieDef", ("value", "httponly"))
Cookie = namedtuple("Cookie", ("domain", "path", "value", "httponly"))
cookie_map = {
"test": CookieDef("Cookie1", True),
"c2": CookieDef("Cookie2", False),
}
cookies = {
c.name: Cookie(c.domain, c.path, c.value, "HttpOnly" in c._rest.keys())
for c in response.cookies.jar
}
for name, definition in cookie_map.items():
cookie = cookies.get(name)
assert cookie
assert cookie.value == definition.value
assert cookie.domain == "mockserver.local"
assert cookie.path == "/"
assert cookie.httponly == definition.httponly
@pytest.mark.asyncio
async def test_content_type(app):
@app.get("/json")
def send_json(request):
return json({"foo": "bar"})
@app.get("/text")
def send_text(request):
return text("foobar")
@app.get("/custom")
def send_custom(request):
return text("foobar", content_type="somethingelse")
_, response = await app.asgi_client.get("/json")
assert response.headers.get("content-type") == "application/json"
_, response = await app.asgi_client.get("/text")
assert response.headers.get("content-type") == "text/plain; charset=utf-8"
_, response = await app.asgi_client.get("/custom")
assert response.headers.get("content-type") == "somethingelse"
| 26.848571
| 79
| 0.693306
|
3531e98dff8912e28799ee5062ae2b66d439a15e
| 3,027
|
py
|
Python
|
filter.py
|
cueo/gmail-api-wrapper
|
56de7c9d978a3e538741e42d46684d044820dde6
|
[
"MIT"
] | null | null | null |
filter.py
|
cueo/gmail-api-wrapper
|
56de7c9d978a3e538741e42d46684d044820dde6
|
[
"MIT"
] | null | null | null |
filter.py
|
cueo/gmail-api-wrapper
|
56de7c9d978a3e538741e42d46684d044820dde6
|
[
"MIT"
] | null | null | null |
from label import Label
class Filter:
def __init__(self, service):
self.service = service
self.label_service = Label(service)
self.filters = None
def all_filters(self, cached=True):
if not cached or self.filters is None:
self.filters = self.service.users().settings().filters().list(userId='me').execute()
return self.filters
def create_filter(self, add_labels, remove_labels, senders):
labels = add_labels + remove_labels
label_ids = self.label_service.get_label_ids(labels)
print('Label and label ids:', label_ids)
from_emails = ' OR '.join(senders)
add_label_ids = [label_ids[label_id] for label_id in add_labels]
remove_label_ids = [label_ids[label_id] for label_id in remove_labels]
print('Setting filter for from=%s add_labels=%s remove_labels=%s'
% (from_emails, add_label_ids, remove_label_ids))
new_filter = {
'criteria': {
'from': from_emails
},
'action': {
'addLabelIds': add_label_ids,
'removeLabelIds': remove_label_ids
}
}
result = self._create_filter(new_filter)
print('Created filter: %s' % result['id'])
return result
def update_filter(self, label, sender):
"""
Update filter to add sender to the filter for the given label.
Args:
label: label to add for the sender's mails
sender: from address
"""
label_id = self.label_service.get_label_id(label)
print('Label id: %s' % label_id)
filter_object = self.get_filter(label_id)
if filter_object is None:
print('Filter not found for the label: %s' % label)
return
senders = filter_object['criteria']['from']
if sender in senders:
print('Filter already contains %s' % sender)
return
senders += ' OR %s' % sender
filter_object['criteria']['from'] = senders
self.delete_filter(filter_object['id'])
result = self._create_filter(filter_object)
print('Created filter with id: %s' % result['id'])
return result
def _create_filter(self, filter_object):
return self.service.users().settings().filters().create(userId='me', body=filter_object).execute()
def get_filter(self, label_id):
filters = self.service.users().settings().filters().list(userId='me').execute()['filter']
# next(_filter for _filter in filters
# if 'addLabelIds' in _filter['action'] and _filter['action']['addLabelIds'] == label_id)
for _filter in filters:
if label_id in _filter.get('action', {}).get('addLabelIds', []):
return _filter
def delete_filter(self, filter_id):
result = self.service.users().settings().filters().delete(userId='me', id=filter_id).execute()
print('Deleted filter:', result)
return result
| 39.311688
| 106
| 0.608523
|
955f82fa432e82ab1b95f9ecddaca5537a9eca11
| 626
|
py
|
Python
|
sympy/series/tests/test_kauers.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 7
|
2015-01-14T06:55:33.000Z
|
2018-08-11T14:43:52.000Z
|
sympy/series/tests/test_kauers.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 1
|
2018-02-19T04:56:04.000Z
|
2018-02-19T04:56:04.000Z
|
sympy/series/tests/test_kauers.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 1
|
2016-04-24T14:39:22.000Z
|
2016-04-24T14:39:22.000Z
|
from sympy.series.kauers import finite_diff
from sympy.abc import x, y, z, w, n
from sympy import sin, cos
from sympy import pi
def test_finite_diff():
assert finite_diff(x**2 + 2*x + 1, x) == 2*x + 3
assert finite_diff(y**3 + 2*y**2 + 3*y + 5, y) == 3*y**2 + 7*y + 6
assert finite_diff(z**2 - 2*z + 3, z) == 2*z - 1
assert finite_diff(w**2 + 3*w - 2, w) == 2*w + 4
assert finite_diff(sin(x), x, pi/6) == -sin(x) + sin(x + pi/6)
assert finite_diff(cos(y), y, pi/3) == -cos(y) + cos(y + pi/3)
assert finite_diff(x**2 - 2*x + 3, x, 2) == 4*x
assert finite_diff(n**2 - 2*n + 3, n, 3) == 6*n + 3
| 39.125
| 70
| 0.563898
|
6be5191ab973c6cf239e1be9d1671ec686d18c0a
| 15,908
|
py
|
Python
|
tests/test_library.py
|
ralfstx/mopidy-kitchen
|
5b0f5eb1ca64ae0a2209e49f0a48906f23e81896
|
[
"MIT"
] | null | null | null |
tests/test_library.py
|
ralfstx/mopidy-kitchen
|
5b0f5eb1ca64ae0a2209e49f0a48906f23e81896
|
[
"MIT"
] | null | null | null |
tests/test_library.py
|
ralfstx/mopidy-kitchen
|
5b0f5eb1ca64ae0a2209e49f0a48906f23e81896
|
[
"MIT"
] | null | null | null |
import logging
from mopidy.models import Album, Image, Ref, SearchResult, Track
from mopidy_kitchen.library import KitchenLibraryProvider
from mopidy_kitchen.uri import AlbumsUri, parse_uri
from .helpers import EXAMPLE_ALBUM, make_album, make_config, make_image, make_station
def test_detects_duplicates(tmp_path, caplog):
make_album(tmp_path / "media" / "foo", EXAMPLE_ALBUM)
make_album(tmp_path / "media" / "bar", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
assert len(provider.browse(str(AlbumsUri()))) == 1
assert len(caplog.records) == 1
assert caplog.records[0].levelno == logging.WARNING
assert caplog.records[0].getMessage() in (
f"Duplicate albums: '{tmp_path}/media/foo' and '{tmp_path}/media/bar'",
f"Duplicate albums: '{tmp_path}/media/bar' and '{tmp_path}/media/foo'",
)
# == root_directory ==
def test_root_directory(tmp_path, caplog):
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.root_directory
assert caplog.text == ""
assert result == Ref.directory(uri="kitchen:root", name="Kitchen media")
# == browse ==
def test_browse_root(tmp_path, caplog):
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.browse("kitchen:root")
assert caplog.text == ""
assert result == [
Ref.directory(uri="kitchen:albums", name="Albums"),
Ref.directory(uri="kitchen:stations", name="Stations"),
]
def test_browse_albums(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "John Doe - One Day"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.browse("kitchen:albums")
assert caplog.text == ""
assert len(result) > 0
assert result[0].type == "album"
assert result == [Ref.album(uri="kitchen:album:95506c273e4ecb0333d19824d66ab586", name="John Doe - One Day")]
def test_browse_stations(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.browse("kitchen:stations")
assert caplog.text == ""
assert len(result) > 0
assert result[0].type == "album"
assert result == [Ref.album(uri="kitchen:station:770e06d40b8b4d64e89c24098d25fdc2", name="Radio 1")]
def test_browse_album(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
album_uri = provider.browse("kitchen:albums")[0].uri
result = provider.browse(album_uri)
assert caplog.text == ""
assert len(result) > 0
assert result[0].type == "track"
def test_browse_missing_album(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.browse("kitchen:album:01234567012345670123456701234567")
assert caplog.text == ""
assert result == []
def test_browse_station(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
station_uri = provider.browse("kitchen:stations")[0].uri
result = provider.browse(station_uri)
assert caplog.text == ""
assert len(result) > 0
assert result[0].type == "track"
def test_browse_missing_station(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.browse("kitchen:station:01234567012345670123456701234567")
assert caplog.text == ""
assert result == []
def test_browse_other(tmp_path, caplog):
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.browse("kitchen:nonsense")
assert result == []
assert len(caplog.records) == 1
assert caplog.records[0].levelno == logging.ERROR
assert caplog.records[0].getMessage() == "Error in browse for kitchen:nonsense: Unsupported URI"
# == lookup ==
def test_lookup_album(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
album_uri = provider.browse("kitchen:albums")[0].uri
result = provider.lookup(album_uri)
assert caplog.text == ""
assert len(result) > 0
assert isinstance(result[0], Track)
def test_lookup_missing_album(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.lookup("kitchen:album:01234567012345670123456701234567")
assert caplog.text == ""
assert result == []
def test_lookup_album_track(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
album_uri = provider.browse("kitchen:albums")[0].uri
result = provider.lookup(album_uri + ":1:1")
assert caplog.text == ""
assert len(result) > 0
assert isinstance(result[0], Track)
def test_lookup_missing_album_track(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
album_uri = provider.browse("kitchen:albums")[0].uri
result = provider.lookup(album_uri + ":9:9")
assert caplog.text == ""
assert result == []
def test_lookup_station(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
station_uri = provider.browse("kitchen:stations")[0].uri
result = provider.lookup(station_uri)
assert caplog.text == ""
assert len(result) > 0
assert isinstance(result[0], Track)
def test_lookup_missing_station(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.lookup("kitchen:station:01234567012345670123456701234567")
assert caplog.text == ""
assert result == []
def test_lookup_station_stream(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
station_uri = provider.browse("kitchen:stations")[0].uri
result = provider.lookup(station_uri + ":1")
assert caplog.text == ""
assert len(result) > 0
assert isinstance(result[0], Track)
def test_lookup_other(tmp_path, caplog):
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.lookup("kitchen:nonsense")
assert result == []
assert len(caplog.records) == 1
assert caplog.records[0].levelno == logging.ERROR
assert caplog.records[0].getMessage() == "Error in lookup for kitchen:nonsense: Unsupported URI"
# == search ==
def test_search_not_found(tmp_path, caplog):
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"any": ["foo"]})
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert result.albums == ()
assert result.tracks == ()
def test_search_match_album(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "test1", "title": "Goodbye Maria"})
make_album(tmp_path / "media" / "a2", {"name": "test2", "title": "Goodbye Marianne"})
make_album(tmp_path / "media" / "a3", {"name": "test3", "title": "Goodbye Marlene"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"album": ["mari"]})
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert {album.name for album in result.albums} == {"Goodbye Maria", "Goodbye Marianne"}
assert result.tracks == ()
def test_search_match_multi_words(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "test1", "title": "Goodbye Maria"})
make_album(tmp_path / "media" / "a2", {"name": "test2", "title": "Goodbye Marianne"})
make_album(tmp_path / "media" / "a3", {"name": "test3", "title": "Good Night Marlene"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"album": ["ni mar"]})
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert [album.name for album in result.albums] == ["Good Night Marlene"]
assert result.tracks == ()
def test_search_match_multi_words_across_different_fields(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "test1", "title": "Goodbye Maria", "artist": "John Doe"})
make_album(tmp_path / "media" / "a2", {"name": "test2", "title": "Goodbye Marianne", "artist": "John Doe"})
make_album(tmp_path / "media" / "a3", {"name": "test3", "title": "Good Night Marlene", "artist": "John Doe"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"any": ["john maria"]})
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert {album.name for album in result.albums} == {"Goodbye Maria", "Goodbye Marianne"}
assert result.tracks == ()
def test_search_match_album_exact(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "test1", "title": "Goodbye Maria"})
make_album(tmp_path / "media" / "a2", {"name": "test2", "title": "Goodbye Marianne"})
make_album(tmp_path / "media" / "a3", {"name": "test3", "title": "Goodbye Marlene"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"album": ["maria"]}, exact=True)
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert [album.name for album in result.albums] == ["Goodbye Maria"]
assert result.tracks == ()
def test_search_match_albumartist(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "test1", "title": "One", "artist": "John Jackson"})
make_album(tmp_path / "media" / "a2", {"name": "test2", "title": "Two", "artist": "Jack Johnson"})
make_album(tmp_path / "media" / "a3", {"name": "test3", "title": "Three", "artist": "James Jameson"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"albumartist": ["jack"]})
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert {album.name for album in result.albums} == {"One", "Two"}
assert result.tracks == ()
def test_search_match_albumartist_exact(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "test1", "title": "One", "artist": "John Jackson"})
make_album(tmp_path / "media" / "a2", {"name": "test2", "title": "Two", "artist": "Jack Johnson"})
make_album(tmp_path / "media" / "a3", {"name": "test3", "title": "Three", "artist": "James Jameson"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"albumartist": ["jack"]}, exact=True)
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert [album.name for album in result.albums] == ["Two"]
assert result.tracks == ()
def test_search_match_trackname(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.search({"track_name": ["morn"]})
assert caplog.text == ""
assert type(result) == SearchResult
assert result.uri == "kitchen:search"
assert result.albums == ()
assert [track.name for track in result.tracks] == ["The Morning"]
# == get_images ==
def test_get_images_empty(tmp_path, caplog):
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
result = provider.get_images([])
assert caplog.text == ""
assert result == {}
def test_get_images_for_album_without_image(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
album_uri = provider.browse(str(AlbumsUri()))[0].uri
result = provider.get_images([album_uri])
assert caplog.text == ""
assert result == {}
def test_get_images_for_album_with_image(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
make_image(tmp_path / "media" / "a1" / "cover.jpg")
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
album_uri = provider.browse(str(AlbumsUri()))[0].uri
album_id = parse_uri(album_uri).album_id
result = provider.get_images([album_uri])
assert caplog.text == ""
assert result == {album_uri: [Image(uri=f"/kitchen/albums/{album_id}/cover.jpg")]}
def test_get_images_for_track_with_image(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
make_image(tmp_path / "media" / "a1" / "cover.jpg")
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
track_uri = provider.browse(str(AlbumsUri()))[0].uri + ":1:1"
album_id = parse_uri(track_uri).album_id
result = provider.get_images([track_uri])
assert caplog.text == ""
assert result == {track_uri: [Image(uri=f"/kitchen/albums/{album_id}/cover.jpg")]}
def test_get_images_for_multiple_uris(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", EXAMPLE_ALBUM)
make_image(tmp_path / "media" / "a1" / "cover.jpg")
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
track1_uri = provider.browse(str(AlbumsUri()))[0].uri + ":1:1"
track2_uri = provider.browse(str(AlbumsUri()))[0].uri + ":1:2"
album_id = parse_uri(track1_uri).album_id
result = provider.get_images([track1_uri, track2_uri])
assert caplog.text == ""
assert result == {
track1_uri: [Image(uri=f"/kitchen/albums/{album_id}/cover.jpg")],
track2_uri: [Image(uri=f"/kitchen/albums/{album_id}/cover.jpg")],
}
# == get_playback_uri ==
def test_get_playback_uri_album_track(tmp_path, caplog):
make_album(tmp_path / "media" / "a1", {"name": "John Doe - One Day", "tracks": [{"path": "01.ogg"}]})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
track_uri = provider.browse("kitchen:albums")[0].uri + ":1:1"
result = provider.get_playback_uri(track_uri)
assert caplog.text == ""
assert result == f"file://{tmp_path}/media/a1/01.ogg"
def test_get_playback_uri_station_stream(tmp_path, caplog):
make_station(tmp_path / "media" / "r1", {"name": "Radio 1", "stream": "http://radio1.com/stream"})
provider = KitchenLibraryProvider(backend={}, config=make_config(tmp_path))
stream_uri = provider.browse("kitchen:stations")[0].uri + ":1"
result = provider.get_playback_uri(stream_uri)
assert caplog.text == ""
assert result == "http://radio1.com/stream"
def join_artists(album: Album):
return ",".join(artist.name for artist in album.artists)
| 36.486239
| 113
| 0.68299
|
ea1bbac86aba4ed28eb909d17edc913f4d3a277c
| 173
|
py
|
Python
|
en/086/python/main.py
|
franciscogomes2020/exercises
|
8b33c4b9349a9331e4002a8225adc2a482c70024
|
[
"MIT"
] | null | null | null |
en/086/python/main.py
|
franciscogomes2020/exercises
|
8b33c4b9349a9331e4002a8225adc2a482c70024
|
[
"MIT"
] | null | null | null |
en/086/python/main.py
|
franciscogomes2020/exercises
|
8b33c4b9349a9331e4002a8225adc2a482c70024
|
[
"MIT"
] | null | null | null |
# Create a program that declares a 3x3 dimensional array and fills it with keyboard-readable values. At the end, show the matrix on the screen, with the correct formatting.
| 86.5
| 172
| 0.797688
|
22a55b22c853b9f7b9c4f89f5f35487285dc17cf
| 898
|
py
|
Python
|
mkt/submit/migrations/0001_initial.py
|
Witia1/zamboni
|
b1e2d5d475abff2fa5d4990415a06adee33bd647
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/submit/migrations/0001_initial.py
|
Witia1/zamboni
|
b1e2d5d475abff2fa5d4990415a06adee33bd647
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/submit/migrations/0001_initial.py
|
Witia1/zamboni
|
b1e2d5d475abff2fa5d4990415a06adee33bd647
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AppSubmissionChecklist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('terms', models.BooleanField(default=False)),
('manifest', models.BooleanField(default=False)),
('details', models.BooleanField(default=False)),
],
options={
'db_table': 'submission_checklist_apps',
},
bases=(models.Model,),
),
]
| 30.965517
| 114
| 0.569042
|
c921cc5f4a2f76250677bea3ae1d00326dc30b98
| 2,248
|
py
|
Python
|
xlsxwriter/test/app/test_app02.py
|
edparcell/XlsxWriter
|
d6a5df232ac0091017ae5c65f592bcc776d296ea
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-01-09T19:43:43.000Z
|
2019-01-09T19:43:43.000Z
|
xlsxwriter/test/app/test_app02.py
|
edparcell/XlsxWriter
|
d6a5df232ac0091017ae5c65f592bcc776d296ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/app/test_app02.py
|
edparcell/XlsxWriter
|
d6a5df232ac0091017ae5c65f592bcc776d296ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...app import App
class TestAssembleApp(unittest.TestCase):
"""
Test assembling a complete App file.
"""
def test_assemble_xml_file(self):
"""Test writing an App file."""
self.maxDiff = None
fh = StringIO()
app = App()
app._set_filehandle(fh)
app._add_part_name('Sheet1')
app._add_part_name('Sheet2')
app._add_heading_pair(('Worksheets', 2))
app._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>2</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="2" baseType="lpstr">
<vt:lpstr>Sheet1</vt:lpstr>
<vt:lpstr>Sheet2</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company>
</Company>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>12.0000</AppVersion>
</Properties>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 33.552239
| 190
| 0.5
|
227e5a695a7153d1287ad8614f986765bfd6ca7e
| 24,196
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/monitor/tests/latest/test_monitor_autoscale.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/monitor/tests/latest/test_monitor_autoscale.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/monitor/tests/latest/test_monitor_autoscale.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import LiveScenarioTest, ScenarioTest, ResourceGroupPreparer, record_only
from knack.util import CLIError
class TestMonitorAutoscaleScenario(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale')
def test_monitor_autoscale_basic(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --count 3', checks=[
self.check('profiles[0].capacity.default', 3),
self.check('profiles[0].capacity.minimum', 3),
self.check('profiles[0].capacity.maximum', 3)
])
self.cmd('monitor autoscale list -g {rg}',
checks=self.check('length(@)', 1))
self.cmd('monitor autoscale show -g {rg} -n {vmss}')
# verify that count behaves correctly
self.cmd('monitor autoscale update -g {rg} -n {vmss} --count 2', checks=[
self.check('profiles[0].capacity.default', 2),
self.check('profiles[0].capacity.minimum', 2),
self.check('profiles[0].capacity.maximum', 2)
])
self.cmd('monitor autoscale update -g {rg} -n {vmss} --min-count 1 --count 2 --max-count 4', checks=[
self.check('profiles[0].capacity.default', 2),
self.check('profiles[0].capacity.minimum', 1),
self.check('profiles[0].capacity.maximum', 4)
])
self.cmd('monitor autoscale update -g {rg} -n {vmss} --max-count 5', checks=[
self.check('profiles[0].capacity.default', 2),
self.check('profiles[0].capacity.minimum', 1),
self.check('profiles[0].capacity.maximum', 5)
])
self.cmd('monitor autoscale delete -g {rg} -n {vmss}')
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_rules')
def test_monitor_autoscale_rules(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --min-count 1 --count 3 --max-count 5')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}')
self.cmd('monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "Percentage CPU > 75 avg 5m" --scale to 5', checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd('monitor autoscale rule create -g {rg} --autoscale-name {vmss} --timegrain "avg 5m" --condition "Percentage CPU < 30 avg 10m" --scale in 50% --cooldown 10', checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'LessThan'),
self.check('metricTrigger.threshold', 30),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT10M'),
self.check('metricTrigger.timeGrain', 'PT5M'),
self.check('scaleAction.cooldown', 'PT10M'),
self.check('scaleAction.direction', 'Decrease'),
self.check('scaleAction.type', 'PercentChangeCount'),
self.check('scaleAction.value', '50')
])
self.cmd('monitor autoscale rule create -g {rg} --autoscale-name {vmss} --timegrain "min 1m" --condition "Percentage CPU < 10 avg 5m" --scale to 1', checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'LessThan'),
self.check('metricTrigger.threshold', 10),
self.check('metricTrigger.statistic', 'Min'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '1')
])
# verify order is stable
list_1 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
with self.assertRaisesRegexp(CLIError, 'Please double check the name of the autoscale profile.'):
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name falseprofile')
list_2 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
self.assertTrue(len(list_1) == 3 and len(list_2) == 3)
for x in range(len(list_1)):
self.assertTrue(list_1[x] == list_2[x])
# verify copy works
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n test2 --start 2018-03-01 --end 2018-04-01 --min-count 1 --count 3 --max-count 5 --timezone "Pacific Standard Time"')
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n test3 --start 2018-05-01 --end 2018-06-01 --min-count 1 --count 2 --max-count 5 --timezone "Pacific Standard Time"')
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n test1 --start 2018-01-01 --end 2018-02-01 --min-count 1 --count 2 --max-count 5 --timezone "Pacific Standard Time" --copy-rules default')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test1',
checks=self.check('length(@)', 3))
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test2',
checks=self.check('length(@)', 0))
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test3',
checks=self.check('length(@)', 0))
self.cmd('monitor autoscale rule copy -g {rg} --autoscale-name {vmss} --source-schedule test1 --dest-schedule test2 --index "*"')
self.cmd('monitor autoscale rule copy -g {rg} --autoscale-name {vmss} --source-schedule test2 --dest-schedule test3 --index 0')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test2',
checks=self.check('length(@)', 3))
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test3',
checks=self.check('length(@)', 1))
# verify rule removal by index and remove all works
self.cmd('monitor autoscale rule delete -g {rg} --autoscale-name {vmss} --index 2')
list_3 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
self.assertTrue(len(list_3) == 2)
self.cmd('monitor autoscale rule delete -g {rg} --autoscale-name {vmss} --index "*"')
list_4 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
self.assertTrue(len(list_4) == 0)
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_rule_with_dimensions')
def test_monitor_autoscale_rule_with_dimensions(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd(
'vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$ --instance-count 2')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --min-count 1 --count 3 --max-count 5')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}')
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "\'Mynamespace.abcd\' Percentage CPU > 75 avg 5m where VMName == cliname1 or cliname2" --scale to 5',
checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'VMName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'cliname1'),
self.check('metricTrigger.dimensions[0].values[1]', 'cliname2'),
self.check('metricTrigger.metricNamespace', 'Mynamespace.abcd'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "\'Mynamespace.abcd\' Percentage CPU > 75 avg 5m where VMName == cliname1 or cliname2" --scale to 5',
checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'VMName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'cliname1'),
self.check('metricTrigger.dimensions[0].values[1]', 'cliname2'),
self.check('metricTrigger.metricNamespace', 'Mynamespace.abcd'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "\'Mynamespace.abcd\' Percentage CPU > 75 avg 5m where VMName == cliname1 or cliname2" --scale to 5',
checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'VMName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'cliname1'),
self.check('metricTrigger.dimensions[0].values[1]', 'cliname2'),
self.check('metricTrigger.metricNamespace', 'Mynamespace.abcd'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}', checks=[
self.check('length(@)', 3)
])
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_fixed')
def test_monitor_autoscale_fixed(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'sched': 'Christmas'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --count 3')
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n {sched} --start 2018-12-24 --end 2018-12-26 --count 5 --timezone "Pacific Standard Time"', checks=[
self.check('capacity.default', 5),
self.check('capacity.minimum', 5),
self.check('capacity.maximum', 5),
self.check('fixedDate.end', '2018-12-26T00:00:00+00:00'),
self.check('fixedDate.start', '2018-12-24T00:00:00+00:00'),
self.check('fixedDate.timeZone', 'Pacific Standard Time'),
self.check('recurrence', None)
])
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 2))
self.cmd('monitor autoscale profile delete -g {rg} --autoscale-name {vmss} -n {sched}')
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_recurring')
def test_monitor_autoscale_recurring(self, resource_group):
import json
import time
sleep_time = 3
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testname --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --count 3')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n weekend --recurrence week sat sun --count 1 --timezone "Pacific Standard Time"')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n weekday --recurrence week mo tu we th fr --count 4 --timezone "Pacific Standard Time"')
time.sleep(sleep_time)
# 2 profiles + 2 "default" profiles + default "default" profile
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 5))
# should update all "default" profiles
value = 4
self.cmd('monitor autoscale update -g {{rg}} -n {{vmss}} --count {}'.format(value))
time.sleep(sleep_time)
schedules = self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}').get_output_in_json()
def _is_default(val):
if not val['fixedDate'] and not val['recurrence']:
return True
try:
json.loads(val['name'])
return True
except ValueError:
return False
for schedule in [x for x in schedules if _is_default(x)]:
self.assertTrue(int(schedule['capacity']['default']) == value)
self.assertTrue(int(schedule['capacity']['minimum']) == value)
self.assertTrue(int(schedule['capacity']['maximum']) == value)
# should delete the weekend profile and its matching default
self.cmd('monitor autoscale profile delete -g {rg} --autoscale-name {vmss} -n weekend')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 3))
# should delete the weekday profile and its matching default
self.cmd('monitor autoscale profile delete -g {rg} --autoscale-name {vmss} -n weekday')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 1))
# inexplicably fails on CI so making into a live test
class TestMonitorAutoscaleTimezones(LiveScenarioTest):
def test_monitor_autoscale_timezones(self):
self.cmd('monitor autoscale profile list-timezones',
checks=self.check('length(@)', 136))
self.cmd('monitor autoscale profile list-timezones -q pacific',
checks=self.check('length(@)', 6))
self.cmd('monitor autoscale profile list-timezones --offset +12',
checks=self.check('length(@)', 6))
self.cmd('monitor autoscale profile list-timezones -q pacific --offset -4',
checks=self.check('length(@)', 1))
class TestMonitorAutoscaleComplexRules(ScenarioTest):
def setUp(self):
super(TestMonitorAutoscaleComplexRules, self).setUp()
self.cmd('extension add -n spring-cloud')
def tearDown(self):
self.cmd('extension remove -n spring-cloud')
super(TestMonitorAutoscaleComplexRules, self).tearDown()
@record_only()
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_rule_for_spring_cloud', location='westus2')
def test_monitor_autoscale_rule_for_spring_cloud(self, resource_group):
self.kwargs.update({
'sc': 'clitest',
'scapp': 'app1',
'deployment': 'default',
'rg': 'clitest',
'deployment_id': '/subscriptions/6c933f90-8115-4392-90f2-7077c9fa5dbd/resourceGroups/clitest/providers/Microsoft.AppPlatform/Spring/clitest/apps/app1/deployments/default'
})
# self.cmd(
# 'spring-cloud create -g {rg} -n {sc}')
# self.kwargs['app_id'] = self.cmd('spring-cloud app create -g {rg} -s {sc} -n {scapp}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {deployment_id} --min-count 1 --count 1 --max-count 3')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {sc}')
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {sc} --condition "tomcat.global.request.total.count > 0 avg 3m where AppName == app1 and Deployment == default" --scale out 1',
checks=[
self.check('metricTrigger.metricName', 'tomcat.global.request.total.count'),
self.check('metricTrigger.metricNamespace', 'Microsoft.AppPlatform/Spring'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 0),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT3M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'AppName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'app1'),
self.check('metricTrigger.dimensions[1].dimensionName', 'Deployment'),
self.check('metricTrigger.dimensions[1].operator', 'Equals'),
self.check('metricTrigger.dimensions[1].values[0]', 'default'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'Increase'),
self.check('scaleAction.type', 'ChangeCount'),
self.check('scaleAction.value', '1')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {sc} --condition "tomcat.global.request.total.count > 0 avg 3m where AppName == app1 and Deployment == default" --scale out 1',
checks=[
self.check('metricTrigger.metricName', 'tomcat.global.request.total.count'),
self.check('metricTrigger.metricNamespace', 'Microsoft.AppPlatform/Spring'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 0),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT3M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'AppName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'app1'),
self.check('metricTrigger.dimensions[1].dimensionName', 'Deployment'),
self.check('metricTrigger.dimensions[1].operator', 'Equals'),
self.check('metricTrigger.dimensions[1].values[0]', 'default'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'Increase'),
self.check('scaleAction.type', 'ChangeCount'),
self.check('scaleAction.value', '1')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {sc} --condition "tomcat.global.request.total.count > 0 avg 3m where AppName == app1 and Deployment == default" --scale out 1',
checks=[
self.check('metricTrigger.metricName', 'tomcat.global.request.total.count'),
self.check('metricTrigger.metricNamespace', 'Microsoft.AppPlatform/Spring'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 0),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT3M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'AppName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'app1'),
self.check('metricTrigger.dimensions[1].dimensionName', 'Deployment'),
self.check('metricTrigger.dimensions[1].operator', 'Equals'),
self.check('metricTrigger.dimensions[1].values[0]', 'default'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'Increase'),
self.check('scaleAction.type', 'ChangeCount'),
self.check('scaleAction.value', '1')
])
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {sc}', checks=[
self.check('length(@)', 3)
])
| 57.885167
| 223
| 0.60634
|
944fb674001b1d6e29a65f7a89686aa664032a05
| 1,956
|
py
|
Python
|
code/conll2tsv.py
|
ltgoslo/nor_dia_change
|
03fae418fec378946a81da4b373e6a581be2d33b
|
[
"CC-BY-4.0"
] | null | null | null |
code/conll2tsv.py
|
ltgoslo/nor_dia_change
|
03fae418fec378946a81da4b373e6a581be2d33b
|
[
"CC-BY-4.0"
] | 1
|
2022-03-29T00:14:22.000Z
|
2022-03-29T00:14:22.000Z
|
code/conll2tsv.py
|
ltgoslo/nor_dia_change
|
03fae418fec378946a81da4b373e6a581be2d33b
|
[
"CC-BY-4.0"
] | 1
|
2022-03-25T15:19:16.000Z
|
2022-03-25T15:19:16.000Z
|
#! python3
# coding: utf-8
import sys
from smart_open import open
from helpers import extract_proper, check_word, num_replace
import csv
SOURCE_FILE = sys.argv[1] # Must be *.conllu.gz
TEMPFILE0_NAME = SOURCE_FILE.replace('.conllu', '.txt')
processed = extract_proper(SOURCE_FILE, TEMPFILE0_NAME) # Can turn off sentence breaks
print('Processed %d lines' % processed, file=sys.stderr)
print('Filtering the corpus...', file=sys.stderr)
functional = set('ADP AUX CCONJ DET PART PRON SCONJ PUNCT'.split())
SKIP_1_WORD = True
corpus_file = open(TEMPFILE0_NAME, 'r')
FILTERED_CORPUS_FILE_NAME = TEMPFILE0_NAME.replace('.txt', '_contexts.tsv')
filtered = open(FILTERED_CORPUS_FILE_NAME, 'a')
filtered.write('ID,LEMMAS,RAW\n')
counter = 0
error_counter = 0
rawwriter = csv.writer(filtered, delimiter=',', quotechar='"', dialect='unix')
for line in corpus_file:
res = line.strip().split('\t')
try:
(tagged, raw) = res
except ValueError:
error_counter += 1
continue
good = []
for w in tagged.split():
try:
(token, pos) = w.split('_')
except:
token = w
pos = "UNK"
print(line, file=sys.stderr)
checked_word = check_word(token, pos, nofunc=functional) # Can feed stopwords list
if not checked_word:
continue
if pos == 'NUM' and token.isdigit(): # Replacing numbers with xxxxx of the same length
checked_word = num_replace(checked_word)
good.append(checked_word)
if SKIP_1_WORD: # May be, you want to filter out one-word sentences
if len(good) < 2:
continue
new_tagged = ' '.join(good)
identifier = str(counter)
rawwriter.writerow([identifier, new_tagged, raw])
counter += 1
corpus_file.close()
filtered.close()
print('Erroneous lines:', error_counter, file=sys.stderr)
print('Final training corpus:', FILTERED_CORPUS_FILE_NAME, file=sys.stderr)
| 30.092308
| 95
| 0.666667
|
ba8443a9f4d83336d760b38d0254fd41110eb089
| 572
|
py
|
Python
|
robot/pmm/resources/locators_50.py
|
sebastianocostanzo/PMM
|
3994736c4bfe1c8ba5f1abf2c0daf948dbb8187c
|
[
"BSD-3-Clause"
] | null | null | null |
robot/pmm/resources/locators_50.py
|
sebastianocostanzo/PMM
|
3994736c4bfe1c8ba5f1abf2c0daf948dbb8187c
|
[
"BSD-3-Clause"
] | null | null | null |
robot/pmm/resources/locators_50.py
|
sebastianocostanzo/PMM
|
3994736c4bfe1c8ba5f1abf2c0daf948dbb8187c
|
[
"BSD-3-Clause"
] | null | null | null |
""" Locators for summer'21 """
from locators_51 import pmm_lex_locators
import copy
pmm_lex_locators = copy.deepcopy(pmm_lex_locators)
pmm_lex_locators[
"contact_save"
] = "//button[contains(@class,'slds-button--neutral') and (text() or @title='Save')]"
pmm_lex_locators["related"][
"button"
] = "//article[contains(@class, 'slds-card slds-card_boundary')][.//span[@title='{}']]//a[@title='{}']"
pmm_lex_locators["new_record"][
"checkbox"
] = "//div[contains(@class,'uiInputCheckbox')]/label/span[text()='{}']/../following-sibling::input[@type='checkbox']"
| 33.647059
| 117
| 0.685315
|
d3295d6b17a6de9f890b025bed7fdbe3fc651699
| 5,193
|
py
|
Python
|
kubernetes/client/api/networking_api.py
|
henrywu2019/python
|
fb7214144395c05349e70a58ea129576f6b11fc4
|
[
"Apache-2.0"
] | 4,417
|
2018-01-13T04:30:48.000Z
|
2022-03-31T15:33:59.000Z
|
kubernetes/client/api/networking_api.py
|
henrywu2019/python
|
fb7214144395c05349e70a58ea129576f6b11fc4
|
[
"Apache-2.0"
] | 1,414
|
2018-01-12T19:31:56.000Z
|
2022-03-31T22:01:02.000Z
|
kubernetes/client/api/networking_api.py
|
henrywu2019/python
|
fb7214144395c05349e70a58ea129576f6b11fc4
|
[
"Apache-2.0"
] | 2,854
|
2018-01-14T08:57:33.000Z
|
2022-03-31T01:41:56.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.21
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class NetworkingApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.314685
| 124
| 0.596187
|
7153b5c6056cb021abb75848ba1c5e455078622a
| 52
|
py
|
Python
|
problem_3/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_3/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_3/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
from .problem_3 import Node, serialize, deserialize
| 26
| 51
| 0.826923
|
07f361d8f00fd2d45bd2ab910a1e86c89104242a
| 4,546
|
py
|
Python
|
bin/statistics.py
|
kderme/spark-memory-simulator
|
b9fe69ea29175cb1452b436c4e898f335e05c47c
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null |
bin/statistics.py
|
kderme/spark-memory-simulator
|
b9fe69ea29175cb1452b436c4e898f335e05c47c
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null |
bin/statistics.py
|
kderme/spark-memory-simulator
|
b9fe69ea29175cb1452b436c4e898f335e05c47c
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import sys
import os
import json
import plotly.plotly as py
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import ntpath
def keep (line):
flag1 = '|| SIMULATION ||' in line
flag2 = '&&' not in line
flag3 = '|| SIMULATION || Predicting..' not in line
return flag1 and flag2 and flag3
def find_last_job(data):
job = -1
for line in data:
if '|| SIMULATION || jobid = ' in line:
job = int(line.split('=')[1])
return job
def toJson (file):
data = [line.strip() for line in open(file, 'r')]
newdata = []
for line in data:
if keep(line):
print line
line1 = line.split('||')[2]
if line1.startswith(' }'):
newdata.append(' "valid" : true')
line1 = ' },'
if 'No results. Simulation failed' in line1:
newdata.append(' "valid" : false')
line1 = ' },'
if '=' in line1:
line1 = ' "' + line1.split('=')[0].strip()+ '" : "' + line1.split('=')[1].strip() +'",'
newdata.append(line1)
print newdata
outfile = open(file + '1.json', 'w')
print>>outfile, '['
for line in newdata[:-1]:
print>>outfile, line
print>>outfile, '}]'
return file + '1.json'
spark_home = os.environ.get('SPARK_HOME')
file = sys.argv[1]
js = toJson (file)
dic = json.load(open(js))
data = [line.strip() for line in open(file, 'r')]
last_job = find_last_job(data)
print dic[0]["valid"]
print range(len(dic))
dic2 = []
for d in dic:
print d
if bool(d.get("jobid")) and d["jobid"] == str(last_job) and bool(d.get("valid") == True):
dic2.append(d)
open(file + ".json", "w").write(
json.dumps(dic2, sort_keys=True, indent=4, separators=(',', ': '))
)
dict = {}
dict[file]={}
sizes = []
for i in range(len(dic2)):
trial = dic2[i]
policy = trial['policy']
hits = int(trial['hits'])
misses = int(trial['misses'])
narrows = int(trial['narrowDependencies'])
shuffles = int(trial['shuffleDpendencies'])
size = int(trial['memory capacity'])
if not size in sizes:
sizes.append(size)
if (not bool(dict[file].get(policy))):
dict[file][policy]={}
dict[file][policy]['hits']=[]
dict[file][policy]['misses']=[]
dict[file][policy]['narrows']=[]
dict[file][policy]['shuffles']=[]
dict[file][policy]['hits'].append(hits)
dict[file][policy]['misses'].append(misses)
dict[file][policy]['narrows'].append(narrows)
dict[file][policy]['shuffles'].append(shuffles)
print dict
print sizes
policies = dict[file].keys
perf = ['hits', 'misses','narrows','shuffles']
for policy in ['LRU', 'LFU', 'FIFO', 'LRC', 'Belady']:
for metr in perf:
x1 = sizes
y1 = dict[file][policy][metr]
print x1
print y1
# data = [go.Bar(x=x1, y=y1)]
name = ntpath.basename(file)
output = '/home/kostas/Repos/spark-logging-dag/diagrams/4/' + name + '_' + policy + '_' + metr
plt.plot(x1, y1)
plt.xlabel('Cache Size')
plt.ylabel(metr)
plt.savefig(output)
plt.clf()
sys.exit()
sys.exit()
## for size in SIZES:
## pathfile = path + '/' + str(size) + '/' + lib + '/' + file
## print pathfile
data = [line.strip() for line in open(pathfile, 'r')]
last_job = find_last_job(data)
while true:
refound_last = False
for line in data:
if (not refound_last):
if '|| SIMULATION || jobid = ' in line:
job = int(line.split('=')[1])
if (job == last_job):
refound_last = True
else:
if '|| SIMULATION || policy = ' in line:
policy = line.split('= ')[1]
if '|| SIMULATION || memory capacity' in line:
size = int(line.split('=')[1])
if (bool(dict[file].get(policy))):
dict[file][policy]={}
dict[file][policy]['hits']=[]
dict[file][policy]['misses']=[]
dict[file][policy]['narrows']=[]
dict[file][policy]['shuffles']=[]
if '|| SIMULATION || hits' in line:
hits = int(line.split('=')[1])
dict[file][policy]['hits'].append(hits)
if '|| SIMULATION || misses = ' in line:
misses = int(line.split('=')[1])
dict[file][policy]['misses'].append(misses)
if '|| SIMULATION || diskHits = ' in line:
diskHits = int(line.split('=')[1])
if '|| SIMULATION || narrowDependencies = ' in line:
narrows = int(line.split('=')[1])
dict[file][policy]['narrows'].append(narrows)
if '|| SIMULATION || shuffleDpendencies = ' in line:
shuffles = int(line.split('=')[1])
dict[file][policy]['shuffles'].append(shuffles)
print dict
| 29.519481
| 98
| 0.577651
|
89c56066d714a6b3ac332beb9a10c725d60cd939
| 603
|
py
|
Python
|
L1Trigger/L1TMuonBarrel/test/kalmanTools/makePropToVertexLUTs.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
L1Trigger/L1TMuonBarrel/test/kalmanTools/makePropToVertexLUTs.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
L1Trigger/L1TMuonBarrel/test/kalmanTools/makePropToVertexLUTs.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1TMuonBarrel.simKBmtfDigis_cfi import bmtfKalmanTrackingSettings as settings
eLoss = settings.eLoss[0]
alpha = settings.aPhiB[0]
alpha2 = settings.aPhiBNLO[0]
dxy=[]
deltaK=[]
for addr in range(0,2048):
Knew=addr*2-int(2*addr/(1+eLoss*addr))
deltaK.append(str(abs(Knew)))
d = int(alpha*addr/(1+alpha2*addr))
dxy.append(str(abs(d)))
print('ap_ufixed<12,12> eLossVertex[2048] = {'+','.join(deltaK)+'};')
print('ap_ufixed<12,12> dxyVertex[2048] = {'+','.join(dxy)+'};')
| 18.272727
| 92
| 0.684909
|
b010eb6d601402bec831742d8bd9f98e6fa9a2dd
| 3,529
|
py
|
Python
|
tests/test_linkedlist.py
|
TomCallR/py_recursive
|
282437b29f783eff7575056bf8522ebf03efca32
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_linkedlist.py
|
TomCallR/py_recursive
|
282437b29f783eff7575056bf8522ebf03efca32
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_linkedlist.py
|
TomCallR/py_recursive
|
282437b29f783eff7575056bf8522ebf03efca32
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from lib.linkedlist import LinkedList, Cons, Empty, LList, Union_LList, identity
def build_data():
nonelist = None
new123: LinkedList[int] = Cons(1, Cons(2, Cons(3, Empty())))
return nonelist, new123
class TestLinkedList(unittest.TestCase):
#
def test_sum_int_fold(self):
nonelist, new123 = build_data()
self.assertEqual(new123.fold(lambda x,y: x+y, lambda x: x, 0), 6)
#
def test_sum_int_foldback(self):
fNode = lambda x,y: x+y
fLeaf = lambda: 0
fId = lambda x: x
nonelist, new123 = build_data()
self.assertEqual(new123.foldback(fNode, fLeaf, fId), 6)
#
def test_convert_to_list_foldback(self):
fLeaf = lambda: []
fNode = lambda x,y : [x] + y
fId = lambda x: x
nonelist, new123 = build_data()
self.assertEqual(new123.foldback(fNode, fLeaf, fId), [1, 2, 3])
#
def test_map_foldback(self):
nonelist, new123 = build_data()
expected: LinkedList[int] = Cons(11, Cons(12, Cons(13, Empty())))
self.assertEqual(new123.map_foldback(lambda x: x+10), expected)
#
def test_filter_foldback(self):
nonelist, new123 = build_data()
expected: LinkedList[int] = Cons(1, Cons(3, Empty()))
self.assertEqual(new123.filter_foldback(lambda x: x%2 == 1), expected)
#
def test_rev_fold(self):
nonelist, new123 = build_data()
actual = new123.rev_fold()
expected: LinkedList[int] = Cons(3, Cons(2, Cons(1, Empty())))
self.assertEqual(actual, expected)
class TestLList(unittest.TestCase):
#
def test_tostring_cata(self):
# pb : the test succeeds, it does not enforce type homogeneity
list123: LList = (1, (2, ("3", None)))
expected = "(1, (2, (3, None)))"
def fCons(value, next):
return f"({value}, {next})"
def fEmpty():
return "None"
self.assertEqual(Union_LList.cata(fCons, fEmpty, list123), expected)
#
def test_sum_int_fold(self):
list123: LList = (1, (2, (3, None)))
def fCons(acc, value):
return acc + value
def fEmpty(acc):
return acc
self.assertEqual(Union_LList.fold(fCons, fEmpty, 0, list123), 6)
#
def test_sum_int_foldback(self):
list123: LList = (1, (2, (3, None)))
def fCons(value, x):
return value + x
def fEmpty():
return 0
self.assertEqual(Union_LList.foldback(fCons, fEmpty, list123, identity), 6)
#
def test_convert_to_list_foldback(self):
list123: LList = (1, (2, (3, None)))
expected = [1, 2, 3]
def fCons(value, x):
return [value] + x
def fEmpty():
return []
self.assertEqual(Union_LList.foldback(fCons, fEmpty, list123, identity), expected)
#
def test_map_foldback(self):
list123: LList = (1, (2, (3, None)))
expected: LList = (11, (12, (13, None)))
self.assertEqual(Union_LList.map_foldback(list123, lambda x: x+10), expected)
#
def test_filter_foldback(self):
list123: LList = (1, (2, (3, None)))
expected: LList = (1, (3, None))
self.assertEqual(Union_LList.filter_foldback(list123, lambda x: x%2 == 1), expected)
#
def test_rev_fold(self):
list123: LList = (1, (2, (3, None)))
expected: LList = (3, (2, (1, None)))
self.assertEqual(Union_LList.rev_fold(list123), expected)
| 36.760417
| 92
| 0.585718
|
e110d72ab75a9c1c8553753034a72c0aa9b2a84f
| 264
|
py
|
Python
|
iiab/extensions.py
|
georgejhunt/internet-in-a-box
|
67f547a43e87b637723f8b5207653fd7f7887974
|
[
"BSD-2-Clause"
] | 37
|
2016-11-18T21:17:29.000Z
|
2021-12-02T11:48:56.000Z
|
iiab/extensions.py
|
georgejhunt/internet-in-a-box
|
67f547a43e87b637723f8b5207653fd7f7887974
|
[
"BSD-2-Clause"
] | 5
|
2015-01-11T18:32:13.000Z
|
2016-06-11T23:10:16.000Z
|
iiab/extensions.py
|
georgejhunt/internet-in-a-box
|
67f547a43e87b637723f8b5207653fd7f7887974
|
[
"BSD-2-Clause"
] | 10
|
2017-06-17T14:46:47.000Z
|
2022-01-23T21:59:59.000Z
|
# -*- coding: utf-8 -*-
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
#from flask.ext.mail import Mail
#mail = Mail()
#from flask.ext.cache import Cache
#cache = Cache()
#from flask.ext.login import LoginManager
#login_manager = LoginManager()
| 18.857143
| 43
| 0.727273
|
148386d18babc9ced9b41709a5a9da74f6efea2f
| 1,901
|
py
|
Python
|
holoviews/plotting/__init__.py
|
zbarry/holoviews
|
25505a4470c56a0d62ba5813c1fe9af59f568bcf
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/__init__.py
|
zbarry/holoviews
|
25505a4470c56a0d62ba5813c1fe9af59f568bcf
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/__init__.py
|
zbarry/holoviews
|
25505a4470c56a0d62ba5813c1fe9af59f568bcf
|
[
"BSD-3-Clause"
] | null | null | null |
"""
HoloViews plotting sub-system the defines the interface to be used by
any third-party plotting/rendering package.
This file defines the HTML tags used to wrap rendered output for
display in the IPython Notebook (optional).
"""
from ..core.options import Cycle, Compositor
from ..element import Area, Polygons
from ..element.sankey import _layout_sankey, Sankey
from .plot import Plot
from .renderer import Renderer, HTML_TAGS # noqa (API import)
from .util import list_cmaps # noqa (API import)
from ..operation.stats import univariate_kde, bivariate_kde
Compositor.register(Compositor("Distribution", univariate_kde, None,
'data', transfer_options=True,
transfer_parameters=True,
output_type=Area,
backends=['bokeh', 'matplotlib']))
Compositor.register(Compositor("Bivariate", bivariate_kde, None,
'data', transfer_options=True,
transfer_parameters=True,
output_type=Polygons))
Compositor.register(Compositor("Sankey", _layout_sankey, None,
'data', transfer_options=True,
transfer_parameters=True,
output_type=Sankey))
DEFAULT_CYCLE = ['#30a2da', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#17becf',
'#9467bd', '#d62728', '#1f77b4', '#e377c2', '#8c564b', '#bcbd22']
Cycle.default_cycles['default_colors'] = DEFAULT_CYCLE
def public(obj):
if not isinstance(obj, type): return False
is_plot_or_cycle = any([issubclass(obj, bc) for bc in [Plot, Cycle]])
is_renderer = any([issubclass(obj, bc) for bc in [Renderer]])
return (is_plot_or_cycle or is_renderer)
_public = list(set([_k for _k, _v in locals().items() if public(_v)]))
__all__ = _public
| 42.244444
| 82
| 0.62546
|
047e68ad5cb935ec0345c4b9fd7a882a2e33be37
| 9,998
|
py
|
Python
|
DEBES_Hugo_code.py
|
hugodebes/Genetic_Algorithm
|
c3c1f205e64692abcbc44c346b6679e99aa324a6
|
[
"Apache-2.0"
] | 1
|
2021-10-11T17:06:28.000Z
|
2021-10-11T17:06:28.000Z
|
DEBES_Hugo_code.py
|
hugodebes/Genetic_Algorithm
|
c3c1f205e64692abcbc44c346b6679e99aa324a6
|
[
"Apache-2.0"
] | null | null | null |
DEBES_Hugo_code.py
|
hugodebes/Genetic_Algorithm
|
c3c1f205e64692abcbc44c346b6679e99aa324a6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Datascience et IA : Approximation d'une fonction (class version).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1e7p76bxHUFlbqSbzTEf_JsCklhRglbr8
1)
Importons toutes les librairies et modules dont nous aurons besoin pour l'execution du code
"""
from math import cos,pi
import random
import pandas as pd
import numpy as np
"""2) Connectons notre Notebook Google Colab avec mon Google Drive pour avoir accès à mes documents (+vérification)"""
# Commented out IPython magic to ensure Python compatibility.
try:
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
COLAB = True
print("Note: using Google CoLab")
# %tensorflow_version 2.x
except:
print("Note: not using Google CoLab")
COLAB = False
"""3)Chargeons notre dataset de points"""
temperature_sample = '/content/drive/MyDrive/ESILV/temperature2.csv'
#temperature_sample = '/content/drive/MyDrive/ESILV/temperature_sample.csv'
data = pd.read_csv(temperature_sample,sep=';')
data=data.rename(columns={'#i':'i'})
data.head()
"""4)Création de la classe individu du problème avec 3 paramètres (a,b,c)"""
class individu:
def __init__(self,a=None,b=None,c=None):
if (a==None or a<=0 or a>=1):
self.a = round(random.uniform(0.01,0.99),3)
else:
self.a=a
if (b==None or b>20 or b<1):
self.b = random.randint(1,20)
else:
self.b=b
if (c==None or c>20 or c<1):
self.c = random.randint(1,20)
else:
self.c=c
self.distance_reel = self.fitness(data)
def affichage(self):
print('Cet individu représente une fonction de Weierstrass de paramètres : a = {} , b = {} , c = {}'.format(self.a,self.b,self.c))
def __str__(self):
return f' a = {self.a} , b = {self.b} , c = {self.c}'
def weierstrass(self,i):
result=0
for n in range(self.c+1):
ite = (self.a**n)*(cos((self.b**n)*pi*i))
result = result+ite
return result
def distance(self,index,data): #valeur absolue de la distance entre la valeur trouvée et la réalité
return abs((self.weierstrass(data.iloc[index][0])-data.iloc[index][1]))
def fitness(self,data): # somme des distances (fonction de coût)
distance_totale=0
for index in range(len(data)-1):
distance_totale += self.distance(index,data)
return distance_totale
def __setitem__(self,a=None,b=None,c=None):
if (b!=None):
self.b=b
if (a!=None):
self.a=a
if (c!=None):
self.c=c
def __getitem__(self,val):
if isinstance(val,str):
if(val=="a"):
result = self.a
if(val=="b"):
result = self.b
if(val=="c"):
result = self.c
else:
result=None
return result
def __eq__(self,ind2):
result=False
if (self.a==ind2.a and self.b==ind2.b and self.c==ind2.c):
result=True
return result
def __lt__(self,ind2):
if (self.fitness(data)<ind2.fitness(data)):
return self < ind2
"""5) Création de notre population composée de n_pop individus + Affichage"""
def generationPopulation(n_pop):
if (n_pop<0):
n_pop=10
pop=[]
for i in range(n_pop):
pop.append(individu())
return pop
def affichagePopulation(pop):
for i in range(len(pop)):
print(pop[i])
"""6) Fonction d'évaluation et de sélection de notre population"""
def evaluatebis(pop):
return sorted(pop,key=lambda individu : individu.distance_reel)
def selectionbis(pop,n_best,n_worst):
return pop[:n_best]+pop[len(pop)-n_worst:]
"""7) Fonction de croisement entre 2 individus "parents" donnant 2 individus "fils"
* Moyenne/Changement de décimale pour a
* Inversion pour b et c
"""
def croisementbis(c1,c2):
c3=individu(round(((c1.a+c2.a)/2),3),c1.b,c2.c)
new_a = eval(str(c1.a)[:-2]+str(c1.a)[-1]+str(c1.a)[-2])
if (new_a>1):
new_a = round(random.uniform(0.01,0.99),3)
c4=individu(new_a,c2.b,c1.c)
return c3,c4
"""8) Fonction de mutation d'un individu
Paramètre aléatoire incrémentant ou décrémentant les valeurs d'un individu
"""
def mutationbis(c1):
a=random.randrange(2)
result=individu()
if (a==1):
result = individu(round((c1.a-0.05),3),c1.b+1,c1.c-1)
if (a==0):
result = individu(round((c1.a+0.05),3),c1.b-1,c1.c+1)
if (result.b<1 or result.b>20):
result.b=random.randint(1,20)
if (result.c<1 or result.c>20):
result.c=random.randint(1,20)
if (result.a<0 or result.a>1):
result.a=round(random.uniform(0,1),3)
return result
"""9) Pour tous les individus de la population nous appliquons les opérations de croisement et de mutation"""
def croisement_pop(pop):
new_gen=[]
if (len(pop)%2!=0):
pop.append(individu())
for i in range(0,len(pop),2):
new_ind1, new_ind2 = croisementbis(pop[i],pop[i+1])
new_gen.append(new_ind1)
new_gen.append(new_ind2)
return new_gen
def mutation_pop(pop_select):
mutants=[]
for i in range(len(pop_select)):
mutants.append(mutationbis(pop_select[i]))
return mutants
"""10) Fonction de vérification et d'arrêt de la boucle finale """
def verif(pop,stop,precision):
print(pop[0]," a la meilleure fitness soit ",pop[0].fitness(data))
if (pop[0].fitness(data)<precision):
stop=True
print("Win !")
return stop
def stop(ite,n_epoch,pop,cond_stop_performance):
cond_stop=False
if (ite>=n_epoch or len(pop)<=2 or cond_stop_performance==True ):
cond_stop=True
return cond_stop
"""11) Boucle finale du Problème"""
def boucle_finalv2(precision):
n_epoch=20 #nombre de générations maximales
n_pop=10 #nombre d'individus de la population de départ
population = generationPopulation(n_pop)
coeff_best=0.35 #pourcentage des meilleurs éléments sélectionnés
coeff_worst=0.15 #pourcentage des pires éléments sélectionnés
ite=0 #génération en cours
cond_stop_performance=False #arrêt du programme si le meilleur individu dépasse nos attentes de performance
cond_stop=stop(ite,n_epoch,population,cond_stop_performance)
stock=[] #liste de l'ensemble du meilleur individu de chaque génération
win=False
stuck = 1 if precision*3<=1 else precision*3 #le programme est bloqué dans un min local si la fitness est au dessus de cette variable
while (cond_stop == False):
print("Generation n° ",ite+1)
ite+=1
cond_stop=stop(ite,n_epoch,population,cond_stop_performance)
eval_pop = evaluatebis(population) #évaluation de notre population actuelle
stock.append(eval_pop[0].distance_reel)
cond_stop_performance = verif(eval_pop,cond_stop_performance,precision)
if (cond_stop_performance==True): #fin du programme si on dépasse l'objectif de performance
result=eval_pop[0]
cond_stop=True
win=True
break
else:
selection_pop = selectionbis(eval_pop,int(coeff_best*len(eval_pop)),int(coeff_worst*len(eval_pop))) #sélection
random.shuffle(selection_pop) #mélange de nos individus qu'ils aient une bonne ou mauvaise fitness
croises = croisement_pop(selection_pop) #croisement
mutes = mutation_pop(selection_pop) # mutation
new_ind=[]
for i in range(5): #ajout de nouveaux individus aléatoires pour apporter de la diversité (évite l'eugnénisme)
new_ind.append(individu())
population = croises[:]+mutes[:]+new_ind[:] #nouvelle population pour la génération suivante
if (len(population)>30): #notre population augmente légèrement, on supprime quelques éléments (rapidité de l'algorithme)
population = population[:30]
if (len(stock)>=7 and np.mean(stock[6:])>stuck): #si la moyenne des fitness des meilleurs éléments est au dela de stuck,
population=generationPopulation(n_pop) #on se considère dans un min local et on regénère une population
stock[:]=[]
print("Regénération de la population...")
if (cond_stop_performance==False): #si on ne trouve pas des éléments, on se contente du meilleur actuel
result=eval_pop[0]
print("Lose...")
return result,win #on retourne l'individu et si la condition de performance est remplie
"""12) Comparatif d'un individu avec toutes les valeurs de notre dataset + écart """
def comparatif(best,dataset):
print("Fitness globale :",best.fitness(data))
for i in range(len(dataset)-1):
print("i : ",dataset.iloc[i][0])
print("Résultat trouvé: " , best.weierstrass(dataset.iloc[i][0]))
print("Réalité :",dataset.iloc[i][1])
print("Ecart :",abs((best.weierstrass(dataset.iloc[i][0])-dataset.iloc[i][1])/dataset.iloc[i][1]))
"""13) Résultat et temps d'execution"""
# Commented out IPython magic to ensure Python compatibility.
best,win = boucle_finalv2(0.2)
print("Best:",best)
print("Fitness:",best.distance_reel)
"""14) Main du programme
(A considérer si on a aucune idée de la précision que l'on peut atteindre avec un set de données)
1. On considère une précision de départ
2. Le but de cette boucle est d'utiliser la boucle_final() et de diminuer la précision recherchée si celle-ci a été rempli précédemment
3. Si la boucle_final() ne bat pas la précision 3 fois d'affilée on arrète la recherche
"""
if __name__=="__main__":
precision=2 #précision de départ
stop_def=False #stop définitif du programme
stop_compteur=0
winner_list=[]
while (stop_def==False):
result,win=boucle_finalv2(precision)
if (win==True): #précision battue on peut continuer à minimiser notre fonction de fitness
precision -=0.2
stop_compteur=0
winner_list.append(result)
else:
stop_compteur+=1
if (result.distance_reel <evaluatebis(winner_list)[0].distance_reel): # cas où on ne bat pas la précision mais on s'en approche
winner_list.append(result)
if (stop_compteur==3): #trois boucles sans amélioration on arrète la recherche
stop_def=True
print("Precision atteinte :",precision )
print("Best : ",evaluatebis(winner_list)[0])
print("Fitness : ",evaluatebis(winner_list)[0].distance_reel)
| 35.835125
| 137
| 0.693539
|
bcf34071e57d364a2cb106585cb480696e575c2a
| 20,790
|
py
|
Python
|
pikuli/uia/adapter/patterns_plain_description.py
|
NVoronchev/pikuli
|
b67e33fa51a7bb7252c5ac11651e2f005542f955
|
[
"MIT"
] | null | null | null |
pikuli/uia/adapter/patterns_plain_description.py
|
NVoronchev/pikuli
|
b67e33fa51a7bb7252c5ac11651e2f005542f955
|
[
"MIT"
] | 1
|
2021-05-11T13:40:52.000Z
|
2021-05-13T19:42:26.000Z
|
pikuli/uia/adapter/patterns_plain_description.py
|
NVoronchev/pikuli
|
b67e33fa51a7bb7252c5ac11651e2f005542f955
|
[
"MIT"
] | 2
|
2021-03-31T14:10:15.000Z
|
2022-01-24T02:16:04.000Z
|
# -*- coding: utf-8 -*-
PROPERTY = 'property'
METHOD = 'method'
patterns_plain_description = {
"AnnotationPattern" : [
(PROPERTY, 'CurrentAnnotationTypeId',
( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentAnnotationTypeName',
( 'out', 'BSTR', 'retVal' )),
(PROPERTY, 'CurrentAuthor',
( 'out', 'BSTR', 'retVal' )),
(PROPERTY, 'CurrentDateTime',
( 'out', 'BSTR', 'retVal' )),
(PROPERTY, 'CurrentTarget',
( 'out', 'POINTER(IUIAutomationElement)', 'retVal' )),
# (PROPERTY, 'CachedAnnotationTypeId',
# ( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedAnnotationTypeName',
# ( 'out', 'BSTR', 'retVal' )),
# (PROPERTY, 'CachedAuthor',
# ( 'out', 'BSTR', 'retVal' )),
# (PROPERTY, 'CachedDateTime',
# ( 'out', 'BSTR', 'retVal' )),
# (PROPERTY, 'CachedTarget',
# ( 'out', 'POINTER(IUIAutomationElement)', 'retVal' )),
],
"DockPattern" : [
(METHOD, 'SetDockPosition',
( 'in', 'DockPosition', 'dockPos' )),
(PROPERTY, 'CurrentDockPosition',
( 'out', 'DockPosition', 'retVal' )),
# (PROPERTY, 'CachedDockPosition',
# ( 'out', 'DockPosition', 'retVal' )),
],
"DragPattern" : [
(PROPERTY, 'CurrentIsGrabbed',
( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedIsGrabbed',
# ( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentDropEffect',
( 'out', 'BSTR', 'retVal' )),
# (PROPERTY, 'CachedDropEffect',
# ( 'out', 'BSTR', 'retVal' )),
(PROPERTY, 'CurrentDropEffects',
( 'out', '_midlSAFEARRAY(BSTR)', 'retVal' )),
# (PROPERTY, 'CachedDropEffects',
# ( 'out', '_midlSAFEARRAY(BSTR)', 'retVal' )),
(METHOD, 'GetCurrentGrabbedItems',
( 'out', 'POINTER(IUIAutomationElementArray)', 'retVal' )),
# (METHOD, 'GetCachedGrabbedItems',
# ( 'out', 'POINTER(IUIAutomationElementArray)', 'retVal' )),
],
"DropTargetPattern" : [
(PROPERTY, 'CurrentDropTargetEffect',
( 'out', 'BSTR', 'retVal' )),
# (PROPERTY, 'CachedDropTargetEffect',
# ( 'out', 'BSTR', 'retVal' )),
(PROPERTY, 'CurrentDropTargetEffects',
( 'out', '_midlSAFEARRAY(BSTR)', 'retVal' )),
# (PROPERTY, 'CachedDropTargetEffects',
# ( 'out', '_midlSAFEARRAY(BSTR)', 'retVal' )),
],
"ExpandCollapsePattern" : [
(METHOD, 'Expand'),
(METHOD, 'Collapse'),
(PROPERTY, 'CurrentExpandCollapseState',
( 'out', 'POINTER(ExpandCollapseState)', 'retVal' )),
# (PROPERTY, 'CachedExpandCollapseState',
# ( 'out', 'POINTER(ExpandCollapseState)', 'retVal' )),
],
"GridItemPattern" : [
(PROPERTY, 'CurrentContainingGrid',
( 'out', 'POINTER(IUIAutomationElement)', 'retVal' )),
(PROPERTY, 'CurrentRow',
( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentColumn',
( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentRowSpan',
( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentColumnSpan',
( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedContainingGrid',
# ( 'out', 'POINTER(IUIAutomationElement)', 'retVal' )),
# (PROPERTY, 'CachedRow',
# ( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedColumn',
# ( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedRowSpan',
# ( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedColumnSpan',
# ( 'out', 'c_int', 'retVal' )),
],
"GridPattern" : [
(METHOD, 'GetItem',
( 'in', 'c_int', 'row' ),
( 'in', 'c_int', 'column' ),
( 'out', 'POINTER(IUIAutomationElement)', 'element' )),
(PROPERTY, 'CurrentRowCount',
( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentColumnCount',
( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedRowCount',
# ( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedColumnCount',
# ( 'out', 'c_int', 'retVal' )),
],
"InvokePattern" : [
(METHOD, 'Invoke'),
],
"ItemContainerPattern" : [
(METHOD, 'FindItemByProperty',
( 'in', 'POINTER(IUIAutomationElement)', 'pStartAfter' ),
( 'in', 'c_int', 'propertyId' ),
( 'in', 'VARIANT', 'value' ),
( 'out', 'POINTER(IUIAutomationElement)', 'pFound' )),
],
"LegacyIAccessiblePattern" : [
(METHOD, 'Select',
( 'in', 'c_int', 'flagsSelect' )),
(METHOD, 'DoDefaultAction'),
(METHOD, 'SetValue',
( 'in', 'WSTRING', 'szValue' )),
(PROPERTY, 'CurrentChildId',
( 'out', 'c_int', 'pRetVal' )),
(PROPERTY, 'CurrentName',
( 'out', 'BSTR', 'pszName' )),
(PROPERTY, 'CurrentValue',
( 'out', 'BSTR', 'pszValue' )),
(PROPERTY, 'CurrentDescription',
( 'out', 'BSTR', 'pszDescription' )),
(PROPERTY, 'CurrentRole',
( 'out', 'c_ulong', 'pdwRole' )),
(PROPERTY, 'CurrentState',
( 'out', 'c_ulong', 'pdwState' )),
(PROPERTY, 'CurrentHelp',
( 'out', 'BSTR', 'pszHelp' )),
(PROPERTY, 'CurrentKeyboardShortcut',
( 'out', 'BSTR', 'pszKeyboardShortcut' )),
(METHOD, 'GetCurrentSelection',
( 'out', 'POINTER(IUIAutomationElementArray)', 'pvarSelectedChildren' )),
(PROPERTY, 'CurrentDefaultAction',
( 'out', 'BSTR', 'pszDefaultAction' )),
# (PROPERTY, 'CachedChildId',
# ( 'out', 'c_int', 'pRetVal' )),
# (PROPERTY, 'CachedName',
# ( 'out', 'BSTR', 'pszName' )),
# (PROPERTY, 'CachedValue',
# ( 'out', 'BSTR', 'pszValue' )),
# (PROPERTY, 'CachedDescription',
# ( 'out', 'BSTR', 'pszDescription' )),
# (PROPERTY, 'CachedRole',
# ( 'out', 'c_ulong', 'pdwRole' )),
# (PROPERTY, 'CachedState',
# ( 'out', 'c_ulong', 'pdwState' )),
# (PROPERTY, 'CachedHelp',
# ( 'out', 'BSTR', 'pszHelp' )),
# (PROPERTY, 'CachedKeyboardShortcut',
# ( 'out', 'BSTR', 'pszKeyboardShortcut' )),
# (METHOD, 'GetCachedSelection',
# ( 'out', 'POINTER(IUIAutomationElementArray)', 'pvarSelectedChildren' )),
# (PROPERTY, 'CachedDefaultAction',
# ( 'out', 'BSTR', 'pszDefaultAction' )),
# (METHOD, 'GetIAccessible',
# ( 'out', 'POINTER(IAccessible)', 'ppAccessible' )),
],
"MultipleViewPattern" : [
(METHOD, 'GetViewName',
( 'in', 'c_int', 'view' ),
( 'out', 'BSTR', 'name' )),
(METHOD, 'SetCurrentView',
( 'in', 'c_int', 'view' )),
(PROPERTY, 'CurrentCurrentView',
( 'out', 'c_int', 'retVal' )),
(METHOD, 'GetCurrentSupportedViews',
( 'out', '_midlSAFEARRAY(c_int)', 'retVal' )),
# (PROPERTY, 'CachedCurrentView',
# ( 'out', 'c_int', 'retVal' )),
# (METHOD, 'GetCachedSupportedViews',
# ( 'out', '_midlSAFEARRAY(c_int)', 'retVal' )),
],
"ObjectModelPattern" : [
(METHOD, 'GetUnderlyingObjectModel',
( 'out', 'POINTER(IUnknown)', 'retVal' )),
],
"RangeValuePattern" : [
(METHOD, 'SetValue',
( 'in', 'c_double', 'val' )),
(PROPERTY, 'CurrentValue',
( 'out', 'c_double', 'retVal' )),
(PROPERTY, 'CurrentIsReadOnly',
( 'out', 'c_int', 'retVal' )),
(PROPERTY, 'CurrentMaximum',
( 'out', 'c_double', 'retVal' )),
(PROPERTY, 'CurrentMinimum',
( 'out', 'c_double', 'retVal' )),
(PROPERTY, 'CurrentLargeChange',
( 'out', 'c_double', 'retVal' )),
(PROPERTY, 'CurrentSmallChange',
( 'out', 'c_double', 'retVal' )),
# (PROPERTY, 'CachedValue',
# ( 'out', 'c_double', 'retVal' )),
# (PROPERTY, 'CachedIsReadOnly',
# ( 'out', 'c_int', 'retVal' )),
# (PROPERTY, 'CachedMaximum',
# ( 'out', 'c_double', 'retVal' )),
# (PROPERTY, 'CachedMinimum',
# ( 'out', 'c_double', 'retVal' )),
# (PROPERTY, 'CachedLargeChange',
# ( 'out', 'c_double', 'retVal' )),
# (PROPERTY, 'CachedSmallChange',
# ( 'out', 'c_double', 'retVal' )),
],
"ScrollItemPattern" : [
(METHOD, 'ScrollIntoView'),
],
"ScrollPattern" : [
(METHOD, 'Scroll',
( 'in', 'ScrollAmount', 'horizontalAmount' ),
( 'in', 'ScrollAmount', 'verticalAmount' )),
(METHOD, 'SetScrollPercent',
( 'in', 'c_double', 'horizontalPercent' ),
( 'in', 'c_double', 'verticalPercent' )),
(PROPERTY, 'CurrentHorizontalScrollPercent',
( 'out', 'c_double', 'retVal' )),
(PROPERTY, 'CurrentVerticalScrollPercent',
( 'out', 'c_double', 'retVal' )),
(PROPERTY, 'CurrentHorizontalViewSize',
( 'out', 'POINTER(c_double)', 'retVal' )),
(PROPERTY, 'CurrentVerticalViewSize',
( 'out', 'POINTER(c_double)', 'retVal' )),
(PROPERTY, 'CurrentHorizontallyScrollable',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentVerticallyScrollable',
( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedHorizontalScrollPercent',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedVerticalScrollPercent',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedHorizontalViewSize',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedVerticalViewSize',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedHorizontallyScrollable',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedVerticallyScrollable',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
],
"SelectionItemPattern" : [
(METHOD, 'Select'),
(METHOD, 'AddToSelection'),
(METHOD, 'RemoveFromSelection'),
(PROPERTY, 'CurrentIsSelected',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentSelectionContainer',
( 'out', 'POINTER(POINTER(IUIAutomationElement))', 'retVal' )),
# (PROPERTY, 'CachedIsSelected',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedSelectionContainer',
# ( 'out', 'POINTER(POINTER(IUIAutomationElement))', 'retVal' )),
],
"SelectionPattern" : [
(METHOD, 'GetCurrentSelection',
( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
(PROPERTY, 'CurrentCanSelectMultiple',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentIsSelectionRequired',
( 'out', 'POINTER(c_int)', 'retVal' )),
# (METHOD, 'GetCachedSelection',
# ( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
# (PROPERTY, 'CachedCanSelectMultiple',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedIsSelectionRequired',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
],
"SpreadsheetPattern" : [
(METHOD, 'GetItemByName',
( 'in', 'BSTR', 'name' ),
( 'out', 'POINTER(POINTER(IUIAutomationElement))', 'element' )),
],
"SpreadsheetItemPattern" : [
(PROPERTY, 'CurrentFormula',
( 'out', 'POINTER(BSTR)', 'retVal' )),
(METHOD, 'GetCurrentAnnotationObjects',
( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
(METHOD, 'GetCurrentAnnotationTypes',
( 'out', 'POINTER(_midlSAFEARRAY(c_int))', 'retVal' )),
# (PROPERTY, 'CachedFormula',
# ( 'out', 'POINTER(BSTR)', 'retVal' )),
# (METHOD, 'GetCachedAnnotationObjects',
# ( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
# (METHOD, 'GetCachedAnnotationTypes',
# ( 'out', 'POINTER(_midlSAFEARRAY(c_int))', 'retVal' )),
],
"StylesPattern" : [
(PROPERTY, 'CurrentStyleId',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentStyleName',
( 'out', 'POINTER(BSTR)', 'retVal' )),
(PROPERTY, 'CurrentFillColor',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentFillPatternStyle',
( 'out', 'POINTER(BSTR)', 'retVal' )),
(PROPERTY, 'CurrentShape',
( 'out', 'POINTER(BSTR)', 'retVal' )),
(PROPERTY, 'CurrentFillPatternColor',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentExtendedProperties',
( 'out', 'POINTER(BSTR)', 'retVal' )),
(METHOD, 'GetCurrentExtendedPropertiesAsArray',
( 'out', 'POINTER(POINTER(ExtendedProperty))', 'propertyArray' ),
( 'out', 'POINTER(c_int)', 'propertyCount' )),
# (PROPERTY, 'CachedStyleId',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedStyleName',
# ( 'out', 'POINTER(BSTR)', 'retVal' )),
# (PROPERTY, 'CachedFillColor',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedFillPatternStyle',
# ( 'out', 'POINTER(BSTR)', 'retVal' )),
# (PROPERTY, 'CachedShape',
# ( 'out', 'POINTER(BSTR)', 'retVal' )),
# (PROPERTY, 'CachedFillPatternColor',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedExtendedProperties',
# ( 'out', 'POINTER(BSTR)', 'retVal' )),
# (METHOD, 'GetCachedExtendedPropertiesAsArray',
# ( 'out', 'POINTER(POINTER(ExtendedProperty))', 'propertyArray' ),
# ( 'out', 'POINTER(c_int)', 'propertyCount' )),
],
"SynchronizedInputPattern" : [
(METHOD, 'StartListening',
( 'in', 'SynchronizedInputType', 'inputType' )),
(METHOD, 'Cancel'),
],
"TableItemPattern" : [
(METHOD, 'GetCurrentRowHeaderItems',
( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
(METHOD, 'GetCurrentColumnHeaderItems',
( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
# (METHOD, 'GetCachedRowHeaderItems',
# ( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
# (METHOD, 'GetCachedColumnHeaderItems',
# ( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
],
"TablePattern" : [
(METHOD, 'GetCurrentRowHeaders',
( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
(METHOD, 'GetCurrentColumnHeaders',
( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
(PROPERTY, 'CurrentRowOrColumnMajor',
( 'out', 'POINTER(RowOrColumnMajor)', 'retVal' )),
# (METHOD, 'GetCachedRowHeaders',
# ( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
# (METHOD, 'GetCachedColumnHeaders',
# ( 'out', 'POINTER(POINTER(IUIAutomationElementArray))', 'retVal' )),
# (PROPERTY, 'CachedRowOrColumnMajor',
# ( 'out', 'POINTER(RowOrColumnMajor)', 'retVal' )),
],
"TextChildPattern" : [
(PROPERTY, 'TextContainer',
( 'out', 'POINTER(POINTER(IUIAutomationElement))', 'container' )),
(PROPERTY, 'TextRange',
( 'out', 'POINTER(POINTER(IUIAutomationTextRange))', 'range' )),
],
"TextEditPattern" : None,
"TextPattern" : [
(METHOD, 'RangeFromPoint',
( 'in', 'tagPOINT', 'pt' ),
( 'out', 'POINTER(POINTER(IUIAutomationTextRange))', 'range' )),
(METHOD, 'RangeFromChild',
( 'in', 'POINTER(IUIAutomationElement)', 'child' ),
( 'out', 'POINTER(POINTER(IUIAutomationTextRange))', 'range' )),
(METHOD, 'GetSelection',
( 'out', 'POINTER(POINTER(IUIAutomationTextRangeArray))', 'ranges' )),
(METHOD, 'GetVisibleRanges',
( 'out', 'POINTER(POINTER(IUIAutomationTextRangeArray))', 'ranges' )),
(PROPERTY, 'DocumentRange',
( 'out', 'POINTER(POINTER(IUIAutomationTextRange))', 'range' )),
(PROPERTY, 'SupportedTextSelection',
( 'out', 'POINTER(SupportedTextSelection)', 'SupportedTextSelection' )),
],
"TextPattern2" : [
(METHOD, 'RangeFromAnnotation',
( 'in', 'POINTER(IUIAutomationElement)', 'annotation' ),
( 'out', 'POINTER(POINTER(IUIAutomationTextRange))', 'range' )),
(METHOD, 'GetCaretRange',
( 'out', 'POINTER(c_int)', 'isActive' ),
( 'out', 'POINTER(POINTER(IUIAutomationTextRange))', 'range' )),
],
"TogglePattern" : [
(METHOD, 'Toggle'),
(PROPERTY, 'CurrentToggleState',
( 'out', 'POINTER(ToggleState)', 'retVal' )),
# (PROPERTY, 'CachedToggleState',
# ( 'out', 'POINTER(ToggleState)', 'retVal' )),
],
"TransformPattern" : [
(METHOD, 'Move',
( 'in', 'c_double', 'x' ),
( 'in', 'c_double', 'y' )),
(METHOD, 'Resize',
( 'in', 'c_double', 'width' ),
( 'in', 'c_double', 'height' )),
(METHOD, 'Rotate',
( 'in', 'c_double', 'degrees' )),
(PROPERTY, 'CurrentCanMove',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentCanResize',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentCanRotate',
( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedCanMove',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedCanResize',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedCanRotate',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
],
"TransformPattern2" : [
(METHOD, 'Zoom',
( 'in', 'c_double', 'Zoom' )),
(METHOD, 'ZoomByUnit',
( 'in', 'ZoomUnit', 'ZoomUnit' )),
(PROPERTY, 'CurrentCanZoom',
( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedCanZoom',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentZoomLevel',
( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedZoomLevel',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
(PROPERTY, 'CurrentZoomMinimum',
( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedZoomMinimum',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
(PROPERTY, 'CurrentZoomMaximum',
( 'out', 'POINTER(c_double)', 'retVal' )),
# (PROPERTY, 'CachedZoomMaximum',
# ( 'out', 'POINTER(c_double)', 'retVal' )),
],
"ValuePattern" : [
(METHOD, 'SetValue',
( 'in', 'POINTER(BSTR)', 'val' )),
(PROPERTY, 'CurrentValue',
( 'out', 'POINTER(BSTR)', 'retVal' )),
(PROPERTY, 'CurrentIsReadOnly',
( 'out', 'POINTER(c_int)', 'retVal' )),
],
"VirtualizedItemPattern" : [
(METHOD, 'Realize'),
],
"WindowPattern" : [
(METHOD, 'Close'),
(METHOD, 'WaitForInputIdle',
( 'in', 'c_int', 'milliseconds' ),
( 'out', 'POINTER(c_int)', 'success' )),
(METHOD, 'SetWindowVisualState',
( 'in', 'WindowVisualState', 'state' )),
(PROPERTY, 'CurrentCanMaximize',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentCanMinimize',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentIsModal',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentIsTopmost',
( 'out', 'POINTER(c_int)', 'retVal' )),
(PROPERTY, 'CurrentWindowVisualState',
( 'out', 'POINTER(WindowVisualState)', 'retVal' )),
(PROPERTY, 'CurrentWindowInteractionState',
( 'out', 'POINTER(WindowInteractionState)', 'retVal' )),
# (PROPERTY, 'CachedCanMaximize',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedCanMinimize',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedIsModal',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedIsTopmost',
# ( 'out', 'POINTER(c_int)', 'retVal' )),
# (PROPERTY, 'CachedWindowVisualState',
# ( 'out', 'POINTER(WindowVisualState)', 'retVal' )),
# (PROPERTY, 'CachedWindowInteractionState',
# ( 'out', 'POINTER(WindowInteractionState)', 'retVal' )),
],
}
| 42.602459
| 89
| 0.517845
|
6718b30ab52876ca3009c6bbdadc057f3db1d28b
| 1,349
|
py
|
Python
|
files/plugins/check_xfs_errors.py
|
lampkicking/charm-nrpe
|
29d1cd8701d696f8b2472ea43f7a8639e0b7049d
|
[
"MIT"
] | null | null | null |
files/plugins/check_xfs_errors.py
|
lampkicking/charm-nrpe
|
29d1cd8701d696f8b2472ea43f7a8639e0b7049d
|
[
"MIT"
] | null | null | null |
files/plugins/check_xfs_errors.py
|
lampkicking/charm-nrpe
|
29d1cd8701d696f8b2472ea43f7a8639e0b7049d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2017 Canonical Ltd
#
# Author: Jill Rouleau <jill.rouleau@canonical.com>
#
# Check for xfs errors and alert
#
import sys
import re
from datetime import datetime, timedelta
import subprocess
# error messages commonly seen in dmesg on xfs errors
raw_xfs_errors = ['XFS_WANT_CORRUPTED_',
'xfs_error_report',
'corruption detected at xfs_',
'Unmount and run xfs_repair']
xfs_regex = [re.compile(i) for i in raw_xfs_errors]
# nagios can't read from kern.log, so we look at dmesg - this does present
# a known limitation if a node is rebooted or dmesg is otherwise cleared.
log_lines = [line for line in subprocess.getoutput(['dmesg -T']).split('\n')]
err_results = [line for line in log_lines for rgx in xfs_regex if
re.search(rgx, line)]
# Look for errors within the last N minutes, specified in the check definition
check_delta = int(sys.argv[1])
# dmesg -T formatted timestamps are inside [], so we need to add them
datetime_delta = datetime.now() - timedelta(minutes=check_delta)
recent_logs = [i for i in err_results if datetime.strptime(i[1:25], '%c') >= datetime_delta]
if recent_logs:
print('CRITCAL: Recent XFS errors in kern.log.'+'\n'+'{}'.format(
recent_logs))
sys.exit(2)
else:
print('OK')
sys.exit(0)
| 29.326087
| 92
| 0.691623
|
2b366637ae6460a91b1c5abcbe3033e260677e11
| 329
|
py
|
Python
|
HK BOT-10008_2020-12-21_08-19-18.py
|
ClointFusion-Community/CFC-Projects
|
c6381738ade07e6e8979bbae37400ec2b4e626c5
|
[
"MIT"
] | null | null | null |
HK BOT-10008_2020-12-21_08-19-18.py
|
ClointFusion-Community/CFC-Projects
|
c6381738ade07e6e8979bbae37400ec2b4e626c5
|
[
"MIT"
] | null | null | null |
HK BOT-10008_2020-12-21_08-19-18.py
|
ClointFusion-Community/CFC-Projects
|
c6381738ade07e6e8979bbae37400ec2b4e626c5
|
[
"MIT"
] | null | null | null |
i;m;p;o;r;t; ;C;l;o;i;n;t;F;u;s;i;o;n; ;a;s; ;c;f;
;c;f;.;l;a;u;n;c;h;_;a;n;y;_;e;x;e;_;b;a;t;_;a;p;p;l;i;c;a;t;i;o;n;(;';n;o;t;e;p;a;d;';);
;c;f;.;k;e;y;_;h;i;t;_;e;n;t;e;r;(;);
;c;f;.;k;e;y;_;w;r;i;t;e;_;e;n;t;e;r;(;';H;K;H;R;';,;k;e;y;=;';';);
;c;f;.;k;e;y;_;p;r;e;s;s;(;';a;l;t;+;f;4;';);
;c;f;.;k;e;y;_;p;r;e;s;s;(;';n;';);
| 47
| 89
| 0.389058
|
a510de1be5698140a70f4d4b41b48de8eebc3377
| 1,757
|
py
|
Python
|
var/spack/repos/builtin/packages/libuv/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-03-05T10:54:32.000Z
|
2021-03-05T14:14:52.000Z
|
var/spack/repos/builtin/packages/libuv/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
var/spack/repos/builtin/packages/libuv/package.py
|
Kerilk/spack
|
e027942b55407a4a5fe323b93d8e57200c873a43
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-07-19T20:31:27.000Z
|
2021-07-19T21:14:14.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Libuv(AutotoolsPackage):
"""Multi-platform library with a focus on asynchronous IO"""
homepage = "http://libuv.org"
url = "https://github.com/libuv/libuv/archive/v1.9.0.tar.gz"
version('1.41.0', sha256='6cfeb5f4bab271462b4a2cc77d4ecec847fdbdc26b72019c27ae21509e6f94fa')
version('1.40.0', sha256='70fe1c9ba4f2c509e8166c0ca2351000237da573bb6c82092339207a9715ba6b')
version('1.39.0', sha256='dc7b21f1bb7ef19f4b42c5ea058afabe51132d165da18812b70fb319659ba629')
version('1.38.1', sha256='2177fca2426ac60c20f654323656e843dac4f568d46674544b78f416697bd32c')
version('1.25.0', sha256='ce3036d444c3fb4f9a9e2994bec1f4fa07872b01456998b422ce918fdc55c254')
version('1.10.0', sha256='50f4ed57d65af4ab634e2cbdd90c49213020e15b4d77d3631feb633cbba9239f')
version('1.9.0', sha256='f8b8272a0d80138b709d38fad2baf771899eed61e7f9578d17898b07a1a2a5eb')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
# Tries to build an Objective-C file with GCC's C frontend
# https://github.com/libuv/libuv/issues/2805
conflicts('%gcc platform=darwin', when='@:1.37.9',
msg='libuv does not compile with GCC on macOS yet, use clang. '
'See: https://github.com/libuv/libuv/issues/2805')
def autoreconf(self, spec, prefix):
# This is needed because autogen.sh generates on-the-fly
# an m4 macro needed during configuration
bash = which("bash")
bash('autogen.sh')
| 51.676471
| 96
| 0.73136
|
9d43e2c56e02d169fd7039a51bf07e5022d43088
| 13,584
|
py
|
Python
|
elifetools/rawJATS.py
|
elifesciences/elife-tools
|
ee345bf0e6703ef0f7e718355e85730abbdfd117
|
[
"MIT"
] | 9
|
2015-04-16T08:13:31.000Z
|
2020-05-18T14:03:06.000Z
|
elifetools/rawJATS.py
|
elifesciences/elife-tools
|
ee345bf0e6703ef0f7e718355e85730abbdfd117
|
[
"MIT"
] | 310
|
2015-02-11T00:30:09.000Z
|
2021-07-14T23:58:50.000Z
|
elifetools/rawJATS.py
|
elifesciences/elife-tools
|
ee345bf0e6703ef0f7e718355e85730abbdfd117
|
[
"MIT"
] | 9
|
2015-02-04T01:21:28.000Z
|
2021-06-15T12:50:47.000Z
|
from elifetools.utils import first, firstnn, extract_nodes, node_contents_str
"""
rawParser.py extracts and returns the nodes from the article xml using BeautifulSoup so that functionality at higher levels may use and combine them as neccessary.
"""
def article_meta(soup):
return first(extract_nodes(soup, "article-meta"))
def article_title(soup):
return first(extract_nodes(soup, "article-title"))
def title(soup):
return first(extract_nodes(soup, "title"))
def abstract(soup, abstract_type=None):
if abstract_type:
return extract_nodes(
soup, "abstract", attr="abstract-type", value=abstract_type
)
else:
return extract_nodes(soup, "abstract")
def article_id(soup, pub_id_type=None):
if pub_id_type:
return extract_nodes(soup, "article-id", attr="pub-id-type", value=pub_id_type)
else:
return extract_nodes(soup, "article-id")
def doi(soup):
doi_tags = article_id(soup, pub_id_type="doi")
# the first article-id tag whose parent is article-meta
return first([tag for tag in doi_tags if tag.parent.name == "article-meta"])
def publisher_id(soup):
article_id_tags = article_id(soup, pub_id_type="publisher-id")
# the first article-id tag whose parent is article-meta
return first([tag for tag in article_id_tags if tag.parent.name == "article-meta"])
def journal_id(soup):
# the first non-nil tag
return firstnn(
extract_nodes(soup, "journal-id", attr="journal-id-type", value="publisher-id")
)
def journal_title(soup):
return first(extract_nodes(soup, "journal-title"))
def journal_issn(soup, pub_format, pub_type):
if pub_format is None and pub_type is None:
# return the first issn tag found regardless of which type
return first(extract_nodes(soup, "issn"))
elif pub_format is not None:
return first(
extract_nodes(soup, "issn", attr="publication-format", value=pub_format)
)
elif pub_type is not None:
return first(extract_nodes(soup, "issn", attr="pub-type", value=pub_type))
def publisher(soup):
return first(extract_nodes(soup, "publisher-name"))
def article_type(soup):
# returns raw data, just that the data doesn't contain any BS nodes
return first(extract_nodes(soup, "article")).get("article-type")
def pub_date(soup, date_type=None, pub_type=None):
if date_type is not None:
return extract_nodes(soup, "pub-date", attr="date-type", value=date_type)
elif pub_type is not None:
return extract_nodes(soup, "pub-date", attr="pub-type", value=pub_type)
else:
return extract_nodes(soup, "pub-date")
def date(soup, date_type=None):
if date_type is not None:
return extract_nodes(soup, "date", attr="date-type", value=date_type)
else:
return extract_nodes(soup, "date")
def history_date(soup, date_type):
date_tags = date(soup, date_type)
return first([tag for tag in date_tags if tag.parent.name == "history"])
def day(soup):
return first(extract_nodes(soup, "day"))
def month(soup):
return first(extract_nodes(soup, "month"))
def year(soup):
return first(extract_nodes(soup, "year"))
def keyword_group(soup):
return extract_nodes(soup, "kwd-group")
def acknowledgements(soup):
return first(extract_nodes(soup, "ack"))
def conflict(soup):
conflict_tags = extract_nodes(soup, "fn", attr="fn-type", value="conflict")
conflict_tags += extract_nodes(soup, "fn", attr="fn-type", value="COI-statement")
return conflict_tags
def permissions(soup):
# a better selector might be "article-meta.permissions"
return extract_nodes(soup, "permissions")
def article_permissions(soup):
# a better selector might be "article-meta.permissions"
permissions_tags = permissions(soup)
return first([tag for tag in permissions_tags if tag.parent.name == "article-meta"])
def licence(soup):
return extract_nodes(soup, "license")
def licence_p(soup):
return extract_nodes(soup, "license-p")
def licence_url(soup):
"License url attribute of the license tag"
if licence(soup):
return first(licence(soup)).get("xlink:href")
def attrib(soup):
return extract_nodes(soup, "attrib")
def copyright_statement(soup):
return first(extract_nodes(soup, "copyright-statement"))
def copyright_year(soup):
return first(extract_nodes(soup, "copyright-year"))
def copyright_holder(soup):
return first(extract_nodes(soup, "copyright-holder"))
def funding_statement(soup):
return first(extract_nodes(soup, "funding-statement"))
def affiliation(soup):
return extract_nodes(soup, "aff")
def research_organism_keywords(soup):
tags = first(
extract_nodes(
soup, "kwd-group", attr="kwd-group-type", value="research-organism"
)
)
if not tags:
return None
return [tag for tag in tags if tag.name == "kwd"] or None
def author_keywords(soup):
# A few articles have kwd-group with no kwd-group-type, so account for those
tags = extract_nodes(soup, "kwd-group")
keyword_tags = []
for tag in tags:
if (
tag.get("kwd-group-type") == "author-keywords"
or tag.get("kwd-group-type") is None
):
keyword_tags += [tag for tag in tag if tag.name == "kwd"]
return keyword_tags
def subject_area(soup, subject_group_type=None):
# Supports all subject areas or just particular ones filtered by
subject_area_tags = []
tags = extract_nodes(soup, "subject")
subject_area_tags = [
tag
for tag in tags
if tag.parent.name == "subj-group"
and tag.parent.parent.name == "article-categories"
and tag.parent.parent.parent.name == "article-meta"
]
if subject_group_type:
subject_area_tags = [
tag
for tag in tags
if tag.parent.get("subj-group-type") == subject_group_type
]
return subject_area_tags
def full_subject_area(soup, subject_group_type=None):
subject_group_tags = extract_nodes(soup, "subj-group")
subject_group_tags = [
tag
for tag in subject_group_tags
if tag.parent.name == "article-categories"
and tag.parent.parent.name == "article-meta"
]
if subject_group_type:
subject_group_tags = list(
filter(lambda tag: tag.get("subj-group-type" == subject_group_type))
)
return subject_group_tags
def custom_meta(soup, meta_name=None):
custom_meta_tags = extract_nodes(soup, "custom-meta")
if meta_name is not None:
custom_meta_tags = [
tag
for tag in custom_meta_tags
if node_contents_str(first(extract_nodes(tag, "meta-name"))) == meta_name
]
return custom_meta_tags
def display_channel(soup):
return subject_area(soup, subject_group_type="display-channel")
def sub_display_channel(soup):
return subject_area(soup, subject_group_type="sub-display-channel")
def category(soup):
return subject_area(soup, subject_group_type="heading")
def related_article(soup):
related_article_tags = extract_nodes(soup, "related-article")
return [tag for tag in related_article_tags if tag.parent.name == "article-meta"]
def mixed_citations(soup):
return extract_nodes(soup, "mixed-citation")
def related_object(soup):
return extract_nodes(soup, "related-object")
def object_id(soup, pub_id_type):
return extract_nodes(soup, "object-id", attr="pub-id-type", value=pub_id_type)
def pub_history(soup):
return extract_nodes(soup, "pub-history")
def event(soup):
return extract_nodes(soup, "event")
def event_desc(soup):
return extract_nodes(soup, "event-desc")
def label(soup):
return first(extract_nodes(soup, "label"))
def contributors(soup):
return extract_nodes(soup, "contrib")
def article_contributors(soup):
article_meta_tag = article_meta(soup)
if article_meta_tag:
contributor_tags = extract_nodes(article_meta_tag, ["contrib", "on-behalf-of"])
return [tag for tag in contributor_tags if tag.parent.name == "contrib-group"]
def authors(soup, contrib_type="author"):
if contrib_type:
return extract_nodes(soup, "contrib", attr="contrib-type", value=contrib_type)
else:
return extract_nodes(soup, "contrib")
def caption(soup):
return first(extract_nodes(soup, "caption"))
def author_notes(soup):
return first(extract_nodes(soup, "author-notes"))
def corresp(soup):
return extract_nodes(soup, "corresp")
def fn_group(soup, content_type=None):
if content_type:
return extract_nodes(soup, "fn-group", attr="content-type", value=content_type)
else:
return extract_nodes(soup, "fn-group")
def fn(soup):
return extract_nodes(soup, "fn")
def media(soup):
return extract_nodes(soup, "media")
def inline_graphic(soup):
return extract_nodes(soup, "inline-graphic")
def graphic(soup):
return extract_nodes(soup, "graphic")
def self_uri(soup):
return extract_nodes(soup, "self-uri")
def supplementary_material(soup):
return extract_nodes(soup, "supplementary-material")
#
# authors
#
def contrib_id(soup):
return extract_nodes(soup, "contrib-id")
def email(soup):
return extract_nodes(soup, "email")
def phone(soup):
return extract_nodes(soup, "phone")
def bio(soup):
return extract_nodes(soup, "bio")
#
# references
#
def ref_list(soup):
return extract_nodes(soup, "ref")
def volume(soup):
return extract_nodes(soup, "volume")
def issue(soup):
return extract_nodes(soup, "issue")
def elocation_id(soup):
return extract_nodes(soup, "elocation-id")
def fpage(soup):
return extract_nodes(soup, "fpage")
def lpage(soup):
return extract_nodes(soup, "lpage")
def collab(soup):
return extract_nodes(soup, "collab")
def publisher_loc(soup):
return extract_nodes(soup, "publisher-loc")
def publisher_name(soup):
return extract_nodes(soup, "publisher-name")
def comment(soup):
return extract_nodes(soup, "comment")
def element_citation(soup):
return extract_nodes(soup, "element-citation")
def etal(soup):
return extract_nodes(soup, "etal")
def pub_id(soup, pub_id_type=None):
if pub_id_type:
return extract_nodes(soup, "pub-id", attr="pub-id-type", value=pub_id_type)
else:
return extract_nodes(soup, "pub-id")
def source(soup):
return extract_nodes(soup, "source")
def person_group(soup):
return extract_nodes(soup, "person-group")
def surname(soup):
return extract_nodes(soup, "surname")
def given_names(soup):
return extract_nodes(soup, "given-names")
def suffix(soup):
return extract_nodes(soup, "suffix")
def ext_link(soup, ext_link_type=None):
if ext_link_type:
return extract_nodes(
soup, "ext-link", attr="ext-link-type", value=ext_link_type
)
else:
return extract_nodes(soup, "ext-link")
def uri(soup):
return extract_nodes(soup, "uri")
def edition(soup):
return extract_nodes(soup, "edition")
def version(soup):
return extract_nodes(soup, "version")
def chapter_title(soup):
return extract_nodes(soup, "chapter-title")
def data_title(soup):
return extract_nodes(soup, "data-title")
def conf_name(soup):
return extract_nodes(soup, "conf-name")
def date_in_citation(soup):
return extract_nodes(soup, "date-in-citation")
def patent(soup):
return extract_nodes(soup, "patent")
#
# back
#
def back(soup):
return first(extract_nodes(soup, "back"))
def app_group(soup):
return extract_nodes(soup, "app-group")
def app(soup):
return extract_nodes(soup, "app")
#
# body
#
def body(soup):
return extract_nodes(soup, "body")
def article_body(soup):
return first(extract_nodes(soup, "body"))
def sub_article(soup, article_type=None):
return extract_nodes(soup, "sub-article", attr="article-type", value=article_type)
def editor_evaluation(soup):
return first(sub_article(soup, "editor-report"))
def decision_letter(soup):
tag = first(sub_article(soup, "article-commentary"))
if not tag:
tag = first(sub_article(soup, "decision-letter"))
return tag
def author_response(soup):
return first(sub_article(soup, "reply"))
def section(soup, sec_type=None):
if sec_type:
return extract_nodes(soup, "sec", attr="sec-type", value=sec_type)
else:
return extract_nodes(soup, "sec")
def paragraph(soup):
return extract_nodes(soup, "p")
def table(soup):
return extract_nodes(soup, "table")
def table_wrap_foot(soup):
return extract_nodes(soup, "table-wrap-foot")
def disp_formula(soup):
return extract_nodes(soup, "disp-formula")
def math(soup):
return extract_nodes(soup, "math")
def boxed_text(soup):
return extract_nodes(soup, "boxed-text")
def fig(soup):
return extract_nodes(soup, "fig")
def fig_group(soup):
return extract_nodes(soup, "fig-group")
def list(soup): # Redefining `list` could be problematic
return extract_nodes(soup, "list")
def list_item(soup):
return extract_nodes(soup, "list-item")
#
# funding
#
def funding_group(soup):
return extract_nodes(soup, "funding-group")
def award_group(soup):
return extract_nodes(soup, "award-group")
def principal_award_recipient(soup):
return extract_nodes(soup, "principal-award-recipient")
def string_name(soup):
return extract_nodes(soup, "string-name")
| 22.123779
| 163
| 0.688678
|
1a4022e05f5d27c54bf3bf7048c362eb093a89fc
| 1,233
|
py
|
Python
|
google/ads/googleads/v8/services/types/domain_category_service.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 285
|
2018-10-05T16:47:58.000Z
|
2022-03-31T00:58:39.000Z
|
google/ads/googleads/v8/services/types/domain_category_service.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 425
|
2018-09-10T13:32:41.000Z
|
2022-03-31T14:50:05.000Z
|
google/ads/googleads/v8/services/types/domain_category_service.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 369
|
2018-11-28T07:01:00.000Z
|
2022-03-28T09:53:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetDomainCategoryRequest",},
)
class GetDomainCategoryRequest(proto.Message):
r"""Request message for
[DomainCategoryService.GetDomainCategory][google.ads.googleads.v8.services.DomainCategoryService.GetDomainCategory].
Attributes:
resource_name (str):
Required. Resource name of the domain
category to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.825
| 120
| 0.729927
|
d875fb1e44159bdb390d7f6afbee936009699c5d
| 994
|
py
|
Python
|
scrapy/core/downloader/handlers/http10.py
|
HyunTruth/scrapy
|
9bc5fab870aaee23905057002276fc0e1a48485f
|
[
"BSD-3-Clause"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/core/downloader/handlers/http10.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 48
|
2018-11-08T01:31:33.000Z
|
2019-03-08T01:18:18.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/core/downloader/handlers/http10.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
"""Download handlers for http and https schemes
"""
from twisted.internet import reactor
from scrapy.utils.misc import load_object
from scrapy.utils.python import to_unicode
class HTTP10DownloadHandler(object):
lazy = False
def __init__(self, settings):
self.HTTPClientFactory = load_object(settings['DOWNLOADER_HTTPCLIENTFACTORY'])
self.ClientContextFactory = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
def download_request(self, request, spider):
"""Return a deferred for the HTTP download"""
factory = self.HTTPClientFactory(request)
self._connect(factory)
return factory.deferred
def _connect(self, factory):
host, port = to_unicode(factory.host), factory.port
if factory.scheme == b'https':
return reactor.connectSSL(host, port, factory,
self.ClientContextFactory())
else:
return reactor.connectTCP(host, port, factory)
| 35.5
| 92
| 0.684105
|
c6a034883c2413d27da776b5b5792279424c5e98
| 2,206
|
py
|
Python
|
PhdThesis/FeasibilityStudies/main.py
|
pariterre/ViolinOptimalControl
|
b7037d051a73f2c6cf5815e9d2269ea8c2e11993
|
[
"MIT"
] | null | null | null |
PhdThesis/FeasibilityStudies/main.py
|
pariterre/ViolinOptimalControl
|
b7037d051a73f2c6cf5815e9d2269ea8c2e11993
|
[
"MIT"
] | 1
|
2020-04-16T02:21:49.000Z
|
2020-04-16T02:21:49.000Z
|
PhdThesis/FeasibilityStudies/main.py
|
pariterre/ViolinOptimalControl
|
b7037d051a73f2c6cf5815e9d2269ea8c2e11993
|
[
"MIT"
] | 1
|
2019-11-18T16:31:16.000Z
|
2019-11-18T16:31:16.000Z
|
from enum import Enum
from feasibility_studies import FatigueIntegrator, StudyConfiguration, FatigueModels, TargetFunctions, FatigueParameters
class Study(Enum):
# DEBUG OPTIONS
XIA_ONLY = StudyConfiguration(
fatigue_parameters=FatigueParameters(),
t_end=600,
fixed_target=0.2,
target_function=TargetFunctions.TARGET_UP_TO_END,
n_points=100000,
x0=((0, 0.8, 0, 0),),
fatigue_models=(FatigueModels.XIA,),
)
XIA_STABILIZED_ONLY = StudyConfiguration(
fatigue_parameters=FatigueParameters(),
t_end=10,
fixed_target=0.2,
target_function=TargetFunctions.TARGET_UP_TO_END,
n_points=100000,
fatigue_models=(FatigueModels.XIA_STABILIZED,),
x0=((0, 0.5, 0, 0),),
plot_options=({"linestyle": "-"},),
)
# Actual studies from the thesis
STUDY1_XIA_LONG = StudyConfiguration(
fatigue_parameters=FatigueParameters(stabilization_factor=100),
t_end=3600,
fixed_target=1,
target_function=TargetFunctions.TARGET_RANDOM_PER_10SECONDS,
n_points=100000,
fatigue_models=(FatigueModels.XIA, FatigueModels.XIA_STABILIZED),
x0=((0, 1, 0), (0, 1, 0),),
rms_indices=((0, 1, 2), (0, 1, 2)),
plot_options=({"linestyle": "-"}, {"linestyle": "--"}),
)
STUDY1_XIA_VS_STABILIZED = StudyConfiguration(
fatigue_parameters=FatigueParameters(stabilization_factor=100),
t_end=0.1,
fixed_target=0.8,
target_function=TargetFunctions.TARGET_UP_TO_END,
n_points=1000,
fatigue_models=(FatigueModels.XIA, FatigueModels.XIA_STABILIZED),
x0=((0, 0.6, 0), (0, 0.6, 0)),
plot_options=({"linestyle": "-"}, {"linestyle": "--"}),
)
def main():
# Define the study to perform
study = Study.STUDY1_XIA_LONG
# Prepare and run the integrator
runner = FatigueIntegrator(study.value)
runner.perform()
# Print some results
runner.print_final_sum()
runner.print_integration_time()
if len(runner.study.fatigue_models.models) == 2:
runner.print_rmse()
runner.plot_results()
if __name__ == "__main__":
main()
| 30.219178
| 120
| 0.651859
|
5e1334067b305d7c172ea8a958ff8dd274c2e443
| 3,433
|
py
|
Python
|
test/update_candidate.py
|
jeromevonk/candidates-api
|
256dc13a1ca73d88ae97b80e010774dc5e97c4ca
|
[
"MIT"
] | 1
|
2018-07-08T02:52:25.000Z
|
2018-07-08T02:52:25.000Z
|
test/update_candidate.py
|
jeromevonk/candidates-api
|
256dc13a1ca73d88ae97b80e010774dc5e97c4ca
|
[
"MIT"
] | null | null | null |
test/update_candidate.py
|
jeromevonk/candidates-api
|
256dc13a1ca73d88ae97b80e010774dc5e97c4ca
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: Candidates API test
# Author: Jerome Vergueiro Vonk
# Created: 01/06/2018
#-------------------------------------------------------------------------------
import requests
import sys
import json
import ast
from get_all_candidates import getAllCandidates
#-------------------------------------------------------------------------------
# Helper function
#-------------------------------------------------------------------------------
def updateCandidate(candidate):
try:
r = requests.put(url, json = candidate)
print(r.status_code)
if r.status_code not in range(200, 300):
print(r.text)
except requests.exceptions.RequestException as e:
print(e)
#-------------------------------------------------------------------------------
# Hosted locally or in heroku
#-------------------------------------------------------------------------------
LOCAL = 'http://localhost:5000/candidates/api/v2.0/'
HEROKU = 'https://candidates-api.herokuapp.com/candidates/api/v2.0/'
AWS = 'http://candidates-api.sa-east-1.elasticbeanstalk.com/candidates/api/v2.0/'
# Default to localhost
URL_BASE = LOCAL
# Parse command line argument
if len(sys.argv) > 1:
if 'heroku' == sys.argv[1]:
URL_BASE = HEROKU
if 'aws' == sys.argv[1]:
URL_BASE = AWS
#-------------------------------------------------------------------------------
# Find the candidate ID
#-------------------------------------------------------------------------------
# Perform the query
response = getAllCandidates()
# Conver to a list of dictionaries
jdata = json.loads(response)
# Hold information of the ID
id_to_update = 0
# Find id of candidate 'Jerome Vonk'
for item in jdata['candidates']:
if item['name'] == 'Jerome Vonk':
id_to_update = item['id']
print("Found Jerome Vonk with id = {}".format(id_to_update) )
if id_to_update == 0:
print("Did not find Jerome Vonk on database")
sys.exit()
#-------------------------------------------------------------------------------
# Update a candidate
#-------------------------------------------------------------------------------
url = URL_BASE + 'candidates/{}'.format(id_to_update)
candidate = { "name" : "Jerome Vergueiro Vonk", "picture" : "", "birthdate" : "18/02/1988", "gender" : 1,
"email" : "vonk@gmail.com", "phone" : "11912345678", "address" : "Avenida Paulista, 1",
"longitude": 0, "latitude": 0, "tags" : ["mecathronics", "dutch/brazilian",], "experience" : [], "education" : []}
# Education
graduation = {"institution" : "USP", "degree" : "Engineering", "date_start" : "01/01/2007", "date_end" : "31/12/2011", "description" : "Mechatronics Engineering is a field between mechanics and elethronics"}
candidate['education'].append(graduation)
# Experience
diebold = {"company" : "Diebold", "job_title" : "Engineer", "date_start" : "01/01/2007", "date_end" : "31/12/2011", "description" : "Mechatronics Engineering is a field between mechanics and elethronics"}
ea = {"company" : "EA", "job_title" : "Tester", "date_start" : "15/06/2017", "date_end" : "28/09/2018", "description" : "Localization tester for brazilian portuguese"}
candidate['experience'].append(diebold)
candidate['experience'].append(ea)
print("Updating candidate...")
updateCandidate(candidate)
| 39.918605
| 207
| 0.511215
|
bf0b75263ff7930817f047e73532f17fd7b44fc6
| 15,155
|
py
|
Python
|
SMBcorr/mar_smb_cumulative.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | null | null | null |
SMBcorr/mar_smb_cumulative.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | null | null | null |
SMBcorr/mar_smb_cumulative.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | 1
|
2020-08-06T19:48:52.000Z
|
2020-08-06T19:48:52.000Z
|
#!/usr/bin/env python
u"""
mar_smb_cumulative.py
Written by Tyler Sutterley (11/2019)
Calculates cumulative anomalies of MAR surface mass balance products
COMMAND LINE OPTIONS:
--help: list the command line options
--directory=X: set the full path to the MAR data directory
--version=X: MAR version to run
v3.5.2
v3.9
v3.10
v3.11
--downscaled: run downscaled MAR
--product: MAR product to calculate
SMB: Surface Mass Balance
PRECIP: Precipitation
SNOWFALL: Snowfall
RAINFALL: Rainfall
RUNOFF: Melt Water Runoff
SNOWMELT: Snowmelt
REFREEZE: Melt Water Refreeze
SUBLIM = Sublimation
--mean: Start and end year of mean (separated by commas)
-M X, --mode=X: Permission mode of directories and files created
-V, --verbose: Verbose output of netCDF4 variables
PROGRAM DEPENDENCIES:
convert_calendar_decimal.py: converts from calendar dates to decimal years
UPDATE HISTORY:
Written 11/2019
"""
from __future__ import print_function
import sys
import os
import re
import getopt
import pyproj
import netCDF4
import builtins
import traceback
import numpy as np
from SMBcorr.convert_calendar_decimal import convert_calendar_decimal
#-- data product longnames
longname = {}
longname['SMB'] = 'Surface_Mass_Balance'
longname['PRECIP'] = 'Precipitation'
longname['SNOWFALL'] = 'Snowfall'
longname['RAINFALL'] = 'Rainfall'
longname['RUNOFF'] = 'Melt_Water_Runoff'
longname['SNOWMELT'] = 'Snowmelt'
longname['REFREEZE'] = 'Melt_Water_Refreeze'
longname['SUBLIM'] = 'Sublimation'
#-- PURPOSE: sort input files by year
def sort_files(regex, input_files):
sort_indices = np.argsort([regex.match(f).group(2) for f in input_files])
return np.array(input_files)[sort_indices]
#-- PURPOSE: get the dimensions for the input data matrices
def get_dimensions(directory,input_files,XNAME,YNAME):
#-- get grid dimensions from first file and 12*number of files
#-- Open the NetCDF file for reading
fileID = netCDF4.Dataset(os.path.join(directory,input_files[0]), 'r')
nx, = fileID[XNAME].shape
ny, = fileID[YNAME].shape
fileID.close()
return ny,nx
#-- PURPOSE: create an output netCDF4 file for the output data fields
def create_netCDF4(OUTPUT, FILENAME=None, UNITS=None, LONGNAME=None,
VARNAME=None, LONNAME=None, LATNAME=None, XNAME=None, YNAME=None,
TIMENAME=None, MASKNAME=None, PROJECTION=None, TITLE=None, VERBOSE=False):
#-- output netCDF4 file
fileID = netCDF4.Dataset(FILENAME,'w',format="NETCDF4")
nc = {}
#-- Defining the netCDF dimensions
#-- create each netCDF4 dimension variable
for key in (XNAME,YNAME):
fileID.createDimension(key, len(OUTPUT[key]))
nc[key] = fileID.createVariable(key, 'f', (key,), zlib=True)
fileID.createDimension(TIMENAME, 1)
nc[TIMENAME] = fileID.createVariable(TIMENAME, 'f', (TIMENAME,), zlib=True)
#-- create each netCDF4 variable
for key,type in zip([LONNAME,LATNAME,MASKNAME],['f','f','b']):
nc[key] = fileID.createVariable(key, type, ('y','x',), zlib=True)
nc[VARNAME] = fileID.createVariable(VARNAME, 'f', ('y','x',),
fill_value=OUTPUT[VARNAME].fill_value, zlib=True)
#-- fill each output netCDF4 variable
for key in (XNAME,YNAME,TIMENAME,LONNAME,LATNAME,MASKNAME,VARNAME):
nc[key][:] = OUTPUT[key]
#-- Defining attributes for each netCDF4 variable
nc[XNAME].units = 'm'
nc[YNAME].units = 'm'
nc[TIMENAME].units = 'years'
nc[TIMENAME].long_name = 'Date_in_Decimal_Years'
nc[LONNAME].long_name = 'longitude'
nc[LONNAME].units = 'degrees_east'
nc[LATNAME].long_name = 'latitude'
nc[LATNAME].units = 'degrees_north'
nc[VARNAME].long_name = LONGNAME
nc[VARNAME].units = UNITS
#-- global variables of netCDF file
fileID.projection = PROJECTION
fileID.TITLE = TITLE
#-- Output NetCDF structure information
if VERBOSE:
print(FILENAME)
print(list(fileID.variables.keys()))
#-- Closing the netCDF file
fileID.close()
#-- PURPOSE: calculates cumulative anomalies in MAR products
def mar_smb_cumulative(input_dir, VERSION, PRODUCT, RANGE=[1961,1990],
DOWNSCALED=False, VERBOSE=False, MODE=0o775):
#-- regular expression pattern for MAR dataset
rx = re.compile('MAR{0}-monthly-(.*?)-(\d+).nc$'.format(VERSION))
#-- netCDF4 variable names (for both direct and derived products)
input_products = {}
#-- SMB from downscaled product
if DOWNSCALED:
#-- variable coordinates
XNAME,YNAME,TIMENAME = ('x','y','time')
#-- SMBcorr is topography corrected SMB for the ice covered area
#-- SMB2 is the SMB for the tundra covered area
input_products['SMB'] = ['SMBcorr','SMB2']
#-- RU from downscaled product
#-- RUcorr is topography corrected runoff for the ice covered area
#-- RU2corr is topography corrected runoff for the tundra covered area
input_products['RUNOFF'] = ['RUcorr','RU2corr']
input_products['PRECIP'] = ['RF','SF']
input_products['SNOWFALL'] = 'SF'
#-- ME from downscaled product
#-- MEcorr is topography corrected melt
input_products['SNOWMELT'] = 'MEcorr'
input_products['SUBLIM'] = 'SU'
input_products['REFREEZE'] = ['MEcorr','RUcorr','RU2corr']
input_products['RAINFALL'] = 'RF'
#-- downscaled projection: WGS84/NSIDC Sea Ice Polar Stereographic North
proj4_params = "+init=EPSG:{0:d}".format(3413)
else:
#-- variable coordinates
XNAME,YNAME,TIMENAME = ('X10_105','Y21_199','TIME')
#-- SMB is SMB for the ice covered area
input_products['SMB'] = 'SMB'
#-- RU is runoff for the ice covered area
#-- RU2 is runoff for the tundra covered area
input_products['RUNOFF'] = ['RU','RU2']
input_products['PRECIP'] = ['RF','SF']
input_products['SNOWFALL'] = 'SF'
input_products['SNOWMELT'] = 'ME'
input_products['SUBLIM'] = 'SU'
input_products['REFREEZE'] = 'RZ'
input_products['RAINFALL'] = 'RF'
#-- MAR model projection: Polar Stereographic (Oblique)
#-- Earth Radius: 6371229 m
#-- True Latitude: 0
#-- Center Longitude: -40
#-- Center Latitude: 70.5
proj4_params = ("+proj=sterea +lat_0=+70.5 +lat_ts=0 +lon_0=-40.0 "
"+a=6371229 +no_defs")
#-- create flag to differentiate between direct and directed products
if (np.ndim(input_products[PRODUCT]) == 0):
#-- direct products
derived_product = False
else:
#-- derived products
derived_product = True
#-- Open the NetCDF4 file for reading
mean_filename = 'MAR_{0}_{1}_mean_{2:4.0f}-{3:4.0f}.nc'
MEAN_FILE = mean_filename.format(VERSION,PRODUCT,RANGE[0],RANGE[1])
with netCDF4.Dataset(os.path.join(input_dir,MEAN_FILE), 'r') as fileID:
MEAN = fileID.variables[PRODUCT][:,:].copy()
#-- output subdirectory
output_sub = 'MAR_{0}_{1}_cumul'
output_dir = os.path.join(input_dir,output_sub.format(VERSION,PRODUCT))
os.makedirs(output_dir,MODE) if not os.access(output_dir,os.F_OK) else None
#-- output netCDF4 title format
TITLE = 'Cumulative_anomalies_relative_to_{0:4d}-{1:4d}_Mean'
#-- find input files
input_files=sort_files(rx,[f for f in os.listdir(input_dir) if rx.match(f)])
#-- input dimensions and counter variable
#-- get dimensions for input dataset
ny,nx = get_dimensions(input_dir,input_files,XNAME,YNAME)
#-- allocate for all data
CUMUL = {}
CUMUL['LON'] = np.zeros((ny,nx))
CUMUL['LAT'] = np.zeros((ny,nx))
CUMUL['VALID'] = np.zeros((ny,nx),dtype=np.bool)
CUMUL['x'] = np.zeros((nx))
CUMUL['y'] = np.zeros((ny))
#-- calculate cumulative anomalies
CUMUL[PRODUCT] = np.ma.zeros((ny,nx),fill_value=-9999.0)
CUMUL[PRODUCT].mask = np.ones((ny,nx),dtype=np.bool)
#-- input monthly data
MONTH = {}
MONTH['MASK'] = np.zeros((ny,nx))
#-- for each file
for t,input_file in enumerate(input_files):
#-- Open the NetCDF file for reading
fileID = netCDF4.Dataset(os.path.join(input_dir,input_file), 'r')
#-- Getting the data from each netCDF variable
#-- latitude and longitude
CUMUL['LON'][:,:] = fileID.variables['LON'][:,:].copy()
CUMUL['LAT'][:,:] = fileID.variables['LAT'][:,:].copy()
#-- extract model x and y
CUMUL['x'][:] = fileID.variables[XNAME][:].copy()
CUMUL['y'][:] = fileID.variables[YNAME][:].copy()
#-- get reanalysis and year from file
reanalysis,year = rx.findall(input_file).pop()
#-- convert from months since year start to calendar month
months = fileID.variables[TIMENAME][:].copy() + 1.0
#-- read land/ice mask
LAND_MASK = fileID.variables['MSK'][:,:].copy()
#-- finding valid points only from land mask
iy,ix = np.nonzero(LAND_MASK > 1)
CUMUL['VALID'][iy,ix] = True
CUMUL[PRODUCT].mask[iy,ix] = False
#-- read downscaled masks
if DOWNSCALED:
#-- read glacier and ice sheet mask (tundra=1, permanent ice=2)
MASK_MAR = fileID.variables['MSK_MAR'][:,:].copy()
SURF_MAR = fileID.variables['SRF_MAR'][:,:].copy()
iy,ix = np.nonzero((SURF_MAR >= 0.0) & (LAND_MASK > 1))
MONTH['MASK'][iy,ix] = MASK_MAR[iy,ix]
else:
MONTH['MASK'][iy,ix] = 2.0
#-- invalid value from MAR product
FILL_VALUE = fileID.variables['SMB']._FillValue
#-- for each month
for m,mon in enumerate(months):
#-- calculate time in decimal format (m+1 to convert from indice)
#-- convert to decimal format (uses matrix algebra for speed)
CUMUL['TIME'] = convert_calendar_decimal(np.float(year),mon)
#-- read each product of interest contained within the dataset
#-- read variables for both direct and derived products
if derived_product:
for p in input_products[PRODUCT]:
MONTH[p] = fileID.variables[p][m,:,:].copy()
else:
p = input_products[PRODUCT]
MONTH[PRODUCT] = fileID.variables[p][m,:,:].copy()
#-- calculate derived products
if (PRODUCT == 'PRECIP'):
#-- PRECIP = SNOWFALL + RAINFALL
MONTH['PRECIP'] = MONTH['SF'] + MONTH['RF']
elif (PRODUCT == 'REFREEZE') and DOWNSCALED:
#-- runoff from permanent ice covered regions and tundra regions
RU1,RU2 = input_products['RUNOFF']
ME = input_products['SNOWMELT']
MONTH['RUNOFF'] = (MONTH['MASK'] - 1.0)*MONTH[RU1] + \
(2.0 - MONTH['MASK'])*MONTH[RU2]
#-- REFREEZE = (total) SNOWMELT - RUNOFF
MONTH['REFREEZE'] = MONTH[ME] - MONTH['RUNOFF']
elif (PRODUCT == 'RUNOFF'):
#-- runoff from permanent ice covered regions and tundra regions
RU1,RU2 = input_products['RUNOFF']
MONTH['RUNOFF'] = (MONTH['MASK'] - 1.0)*MONTH[RU1] + \
(2.0 - MONTH['MASK'])*MONTH[RU2]
elif (PRODUCT == 'SMB'):
#-- SMB from permanent ice covered regions and tundra regions
SMB1,SMB2 = input_products['SMB']
MONTH['SMB'] = (MONTH['MASK'] - 1.0)*MONTH[SMB1] + \
(2.0 - MONTH['MASK'])*MONTH[SMB2]
#-- calculate cumulative for each time step
CUMUL[PRODUCT].data[iy,ix] += MONTH[PRODUCT][iy,ix] - MEAN[iy,ix]
#-- replace masked values with fill value
CUMUL[PRODUCT].data[CUMUL[PRODUCT].mask] = CUMUL[PRODUCT].fill_value
#-- output netCDF4 filename
args = (VERSION, PRODUCT, year, mon)
cumul_file = 'MAR_{0}_{1}_cumul_{2}_{3:02.0f}.nc'.format(*args)
create_netCDF4(CUMUL, FILENAME=os.path.join(output_dir,cumul_file),
UNITS='mmWE', LONGNAME=longname[PRODUCT], VARNAME=PRODUCT,
LONNAME='LON', LATNAME='LAT', XNAME='x', YNAME='y',
TIMENAME='TIME', MASKNAME='VALID', VERBOSE=VERBOSE,
PROJECTION=proj4_params, TITLE=TITLE.format(RANGE[0],RANGE[1]))
#-- change the permissions mode
os.chmod(os.path.join(output_dir,cumul_file),MODE)
#-- close the netcdf file
fileID.close()
#-- PURPOSE: help module to describe the optional input parameters
def usage():
print('\nHelp: {0}'.format(os.path.basename(sys.argv[0])))
print(' -D X, --directory=X\tSet the base data directory')
print(' --version=X\t\tMAR version to run')
print('\tv3.5.2\n\tv3.9\n\tv3.10\n\tv3.11')
print(' --downscaled\t\tRun downscaled MAR')
print(' --product:\t\tMAR product to calculate')
print('\tSMB: Surface Mass Balance')
print('\tPRECIP: Precipitation')
print('\tRUNOFF: Melt Water Runoff')
print('\tSNOWMELT: Snowmelt')
print('\tREFREEZE: Melt Water Refreeze')
print(' --mean:\t\tStart and end year of mean (separated by commas)')
print(' -M X, --mode=X\t\tPermission mode of directories and files created')
print(' -V, --verbose\t\tVerbose output of netCDF4 variables\n')
#-- This is the main part of the program that calls the individual modules
def main():
#-- Read the system arguments listed after the program and run the analyses
#-- with the specific parameters.
long_options = ['help','directory=','version=''downscaled','product=',
'mean=','verbose','mode=']
optlist,arglist = getopt.getopt(sys.argv[1:],'hD:VM:',long_options)
#-- command line parameters
input_dir = os.getcwd()
#-- MAR model version
VERSION = 'v3.11'
DOWNSCALED = False
#-- Products to calculate cumulative
PRODUCTS = ['SMB']
#-- mean range
RANGE = [1961,1990]
VERBOSE = False
MODE = 0o775
for opt, arg in optlist:
if opt in ('-h','--help'):
usage()
sys.exit()
elif opt in ("-D","--directory"):
input_dir = os.path.expanduser(arg)
elif opt in ("--version"):
VERSION = arg
elif opt in ("--downscaled"):
DOWNSCALED = True
elif opt in ("--product"):
PRODUCTS = arg.split(',')
elif opt in ("--mean"):
RANGE = np.array(arg.split(','),dtype=np.int)
elif opt in ("-V","--verbose"):
VERBOSE = True
elif opt in ("-M","--mode"):
MODE = int(arg,8)
#-- for each product
for p in PRODUCTS:
#-- check that product was entered correctly
if p not in longname.keys():
raise IOError('{0} not in valid MAR products'.format(p))
#-- run cumulative program with parameters
mar_smb_cumulative(input_dir, VERSION, p, RANGE=RANGE,
DOWNSCALED=DOWNSCALED, VERBOSE=VERBOSE, MODE=MODE)
#-- run main program
if __name__ == '__main__':
main()
| 41.634615
| 80
| 0.617156
|
68a79eb9967b3c1e8c6908d2ec17332aa88cd12d
| 7,218
|
py
|
Python
|
pywire/test_suite.py
|
Verkhovskaya/PyDL
|
4c3f2d952dd988ff27bf359d2f2cdde65737e062
|
[
"MIT"
] | 5
|
2018-07-28T18:18:59.000Z
|
2022-01-05T19:01:50.000Z
|
pywire/test_suite.py
|
Verkhovskaya/PyDL
|
4c3f2d952dd988ff27bf359d2f2cdde65737e062
|
[
"MIT"
] | null | null | null |
pywire/test_suite.py
|
Verkhovskaya/PyDL
|
4c3f2d952dd988ff27bf359d2f2cdde65737e062
|
[
"MIT"
] | null | null | null |
from pywire.signal import Signal
from tkinter import *
from tkinter.ttk import Separator
from enum import Enum
class BitState(Enum):
TRUE = 1
FALSE = 2
TRUE_FORCED = 3
FALSE_FORCED = 4
UNDEFINED = 5
def bitsToInt(bit_array):
for bit in bit_array:
if bit.state == BitState.UNDEFINED:
return None
total = 0
for index in range(len(bit_array)):
bit = bit_array[index]
total *= 2
if bit.state == BitState.TRUE_FORCED or bit.state == BitState.TRUE:
total += 1
return total
class Bit:
def press(self):
print("PRESSED")
if self.state == BitState.UNDEFINED:
self.state = BitState.TRUE_FORCED
elif self.state == BitState.TRUE_FORCED:
self.state = BitState.FALSE_FORCED
elif self.state == BitState.FALSE_FORCED:
self.state = BitState.UNDEFINED
elif self.state == BitState.TRUE:
self.state = BitState.TRUE_FORCED
elif self.state == BitState.FALSE:
self.state = BitState.TRUE_FORCED
else:
raise Exception("WTF")
self.update_gui()
def update_gui(self):
if self.state == BitState.UNDEFINED:
self.entity.configure(text="UN")
elif self.state == BitState.TRUE_FORCED:
self.entity.configure(text="TF")
elif self.state == BitState.FALSE_FORCED:
self.entity.configure(text="FF")
elif self.state == BitState.TRUE:
self.entity.configure(text="T_")
elif self.state == BitState.FALSE:
self.entity.configure(text="F_")
else:
raise Exception("WTF: " + str(self.state))
def __init__(self, master, row, column):
self.entity = Button(master,
command=self.press)
self.entity.grid(row=row, column=column)
self.state = BitState.FALSE
self.update_gui()
def refresh():
globals()["app"].recalculate_states()
class Application(Frame):
def draw_signals(self, master, signals, start_row):
for signal in signals:
self.bits[signal.name] = [[None for bit_index in range(len(signal))] for t in range(self.time)]
print("LABEL")
Label(master, text=signal.name).grid(row=start_row, column=1)
for bit_index in range(len(signal)):
Label(master, text="<" + str(bit_index) + ">").grid(row=start_row, column=2)
for time_stamp in range(self.time):
self.bits[signal.name][time_stamp][bit_index] = Bit(master, start_row, time_stamp + 3)
Separator(master, orient="horizontal").grid(row=start_row, column=time_stamp + 3, sticky=S + W + E)
start_row += 1
start_row += 1
print("done")
def createLayout(self, master):
Button(master, text="Refresh", command=refresh).grid(row=0, column=0)
for x in range(self.time):
Label(master, text="t=" + str(x)).grid(row=1, column=x+3)
row = 2
if self.input_signals:
Label(master, text="inputs").grid(row=row, column=0)
self.draw_signals(master, self.input_signals, row)
row += sum([len(signal) for signal in self.input_signals])+3
Label(master, text=" ").grid(row=row-1, column=0)
if self.other_signals:
Label(master, text="other").grid(row=row, column=0)
self.draw_signals(master, self.other_signals, row)
row += sum([len(signal) for signal in self.other_signals]) + 3
Label(master, text=" ").grid(row=row-1, column=0)
if self.output_signals:
Label(master, text="outputs").grid(row=row, column=0)
self.draw_signals(master, self.output_signals, row)
row += sum([len(signal) for signal in self.output_signals]) + 3
Label(master, text=" ").grid(row=row-1, column=0)
def recalculate_states(self):
for time_stamp in range(0, self.time):
for signal in Signal.all_signals:
if signal.driving_signals:
input_states = []
for input_signal in signal.driving_signals:
if signal.clock:
input_bits = self.bits[input_signal.name][time_stamp-1]
else:
input_bits = self.bits[input_signal.name][time_stamp]
input_states.append(bitsToInt(input_bits))
output_val = signal.driving_function(*input_states)
if isinstance(output_val, int):
output_string = bin(output_val)[2:].rjust(len(signal), "0")
output_string = output_string[len(output_string)-len(signal):]
print(output_string)
output_bool_array = [letter == "1" for letter in output_string]
print(output_bool_array)
signal_bits = self.bits[signal.name][time_stamp]
for index in range(len(output_bool_array)):
if signal_bits[index].state == BitState.TRUE_FORCED:
pass
elif signal_bits[index].state == BitState.FALSE_FORCED:
pass
elif output_bool_array[index]:
signal_bits[index].state = BitState.TRUE
else:
signal_bits[index].state = BitState.FALSE
elif isinstance(output_val, bool):
for index in range(len(output_bool_array)):
if output_val:
signal_bits[index] = BitState.TRUE
else:
signal_bits[index] = BitState.FALSE
else:
raise Exception("Function output is not a boolean or int")
for signal in Signal.all_signals:
for bit_row in self.bits[signal.name]:
for bit in bit_row:
bit.update_gui()
def __init__(self, master=None):
self.bits = {}
self.time = 10
signals = Signal.all_signals
self.input_signals = list(filter(lambda x: x.io == "in", signals))
self.output_signals = list(filter(lambda x: x.io == "out", signals))
self.other_signals = list(filter(lambda x: not x.io, signals))
Frame.__init__(self, master)
print("Creating layout")
self.createLayout(master)
print("DONE")
self.recalculate_states()
def launch_test():
root = Tk()
app = Application(master=root)
globals()["app"] = app
app.mainloop()
root.destroy()
"""
class TestObject:
def __init__(self, signals=Signal.all_signals, inputs={}, turns=10):
self.all_signals = signals
self.in_signals = list(filter(lambda x: x.io == "in", signals))
self.out_signals = list(filter(lambda x: x.io == "out", signals))
def print(self, turn, signals=self.signals):
"""
| 39.442623
| 119
| 0.557495
|
30f1afcbfbd6380acef898b0fff74262f61f2749
| 5,250
|
py
|
Python
|
stac_fastapi/elasticsearch/stac_fastapi/elasticsearch/extensions/filter.py
|
stac-utils/stac-fastapi-nosql
|
0f4cf9a91f9bfd78f215895b6b848efbb631db97
|
[
"MIT"
] | 6
|
2022-01-26T14:34:52.000Z
|
2022-02-22T00:09:23.000Z
|
stac_fastapi/elasticsearch/stac_fastapi/elasticsearch/extensions/filter.py
|
stac-utils/stac-fastapi-elasticsearch
|
0f4cf9a91f9bfd78f215895b6b848efbb631db97
|
[
"MIT"
] | 46
|
2022-02-22T21:22:17.000Z
|
2022-03-25T15:20:39.000Z
|
stac_fastapi/elasticsearch/stac_fastapi/elasticsearch/extensions/filter.py
|
stac-utils/stac-fastapi-nosql
|
0f4cf9a91f9bfd78f215895b6b848efbb631db97
|
[
"MIT"
] | null | null | null |
"""
Implements Filter Extension.
Basic CQL2 (AND, OR, NOT), comparison operators (=, <>, <, <=, >, >=), and IS NULL.
The comparison operators are allowed against string, numeric, boolean, date, and datetime types.
Basic Spatial Operators (http://www.opengis.net/spec/cql2/1.0/conf/basic-spatial-operators)
defines the intersects operator (S_INTERSECTS).
"""
from __future__ import annotations
import datetime
from enum import Enum
from typing import List, Union
from geojson_pydantic import (
GeometryCollection,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from pydantic import BaseModel
queryables_mapping = {
"id": "id",
"collection": "collection",
"geometry": "geometry",
"datetime": "properties.datetime",
"created": "properties.created",
"updated": "properties.updated",
"cloud_cover": "properties.eo:cloud_cover",
"cloud_shadow_percentage": "properties.s2:cloud_shadow_percentage",
"nodata_pixel_percentage": "properties.s2:nodata_pixel_percentage",
}
class LogicalOp(str, Enum):
"""Logical operator.
CQL2 logical operators and, or, and not.
"""
_and = "and"
_or = "or"
_not = "not"
class ComparisonOp(str, Enum):
"""Comparison operator.
CQL2 comparison operators =, <>, <, <=, >, >=, and isNull.
"""
eq = "="
neq = "<>"
lt = "<"
lte = "<="
gt = ">"
gte = ">="
is_null = "isNull"
def to_es(self):
"""Generate an Elasticsearch term operator."""
if self == ComparisonOp.lt:
return "lt"
elif self == ComparisonOp.lte:
return "lte"
elif self == ComparisonOp.gt:
return "gt"
elif self == ComparisonOp.gte:
return "gte"
else:
raise RuntimeError(
f"Comparison op {self.value} does not have an Elasticsearch term operator equivalent."
)
class SpatialIntersectsOp(str, Enum):
"""Spatial intersections operator s_intersects."""
s_intersects = "s_intersects"
class PropertyReference(BaseModel):
"""Property reference."""
property: str
def to_es(self):
"""Produce a term value for this, possibly mapped by a queryable."""
return queryables_mapping.get(self.property, self.property)
class Timestamp(BaseModel):
"""Representation of an RFC 3339 datetime value object."""
timestamp: datetime.datetime
def to_es(self):
"""Produce an RFC 3339 datetime string."""
return self.timestamp.isoformat()
class Date(BaseModel):
"""Representation of an ISO 8601 date value object."""
date: datetime.date
def to_es(self):
"""Produce an ISO 8601 date string."""
return self.date.isoformat()
Arg = Union[
"Clause",
PropertyReference,
Timestamp,
Date,
Point,
MultiPoint,
LineString,
MultiLineString,
Polygon,
MultiPolygon,
GeometryCollection,
int,
float,
str,
bool,
]
class Clause(BaseModel):
"""Filter extension clause."""
op: Union[LogicalOp, ComparisonOp, SpatialIntersectsOp]
args: List[Arg]
def to_es(self):
"""Generate an Elasticsearch expression for this Clause."""
if self.op == LogicalOp._and:
return {"bool": {"filter": [to_es(arg) for arg in self.args]}}
elif self.op == LogicalOp._or:
return {"bool": {"should": [to_es(arg) for arg in self.args]}}
elif self.op == LogicalOp._not:
return {"bool": {"must_not": [to_es(arg) for arg in self.args]}}
elif self.op == ComparisonOp.eq:
return {"term": {to_es(self.args[0]): to_es(self.args[1])}}
elif self.op == ComparisonOp.neq:
return {
"bool": {
"must_not": [{"term": {to_es(self.args[0]): to_es(self.args[1])}}]
}
}
elif (
self.op == ComparisonOp.lt
or self.op == ComparisonOp.lte
or self.op == ComparisonOp.gt
or self.op == ComparisonOp.gte
):
return {
"range": {to_es(self.args[0]): {to_es(self.op): to_es(self.args[1])}}
}
elif self.op == ComparisonOp.is_null:
return {"bool": {"must_not": {"exists": {"field": to_es(self.args[0])}}}}
elif self.op == SpatialIntersectsOp.s_intersects:
return {
"geo_shape": {
to_es(self.args[0]): {
"shape": to_es(self.args[1]),
"relation": "intersects",
}
}
}
def to_es(arg: Arg):
"""Generate an Elasticsearch expression for this Arg."""
if (to_es_method := getattr(arg, "to_es", None)) and callable(to_es_method):
return to_es_method()
elif gi := getattr(arg, "__geo_interface__", None):
return gi
elif isinstance(arg, GeometryCollection):
return arg.dict()
elif (
isinstance(arg, int)
or isinstance(arg, float)
or isinstance(arg, str)
or isinstance(arg, bool)
):
return arg
else:
raise RuntimeError(f"unknown arg {repr(arg)}")
| 26.649746
| 102
| 0.583238
|
b8aa7e9a05156a3a00e3913f2d0c8fb0dd25d7d9
| 4,345
|
py
|
Python
|
runners/mlcommons_box_k8s/mlcommons_box_k8s/k8s_run.py
|
swiftdiaries/mlbox
|
75af4a06d09190fcef792017078ae80e00cd8d3d
|
[
"Apache-2.0"
] | null | null | null |
runners/mlcommons_box_k8s/mlcommons_box_k8s/k8s_run.py
|
swiftdiaries/mlbox
|
75af4a06d09190fcef792017078ae80e00cd8d3d
|
[
"Apache-2.0"
] | null | null | null |
runners/mlcommons_box_k8s/mlcommons_box_k8s/k8s_run.py
|
swiftdiaries/mlbox
|
75af4a06d09190fcef792017078ae80e00cd8d3d
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import List, Dict
from mlcommons_box.common import mlbox_metadata
from kubernetes import client, config
volume_mount_prefix = "/mnt/mlbox/"
class KubernetesRun(object):
logger: logging.Logger = None
mlbox_job_manifest: client.V1Job = None
namespace: str = "default"
def __init__(self, mlbox: mlbox_metadata.MLBox, loglevel: str):
"""Kubernetes Runner.
Args:
mlbox (mlbox_metadata.MLBox): MLBox specification. Reuses platform config from Docker.
"""
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%d-%b-%y-%H:%M:%S',
level=loglevel)
self.logger = logging.getLogger(__name__)
self.mlbox: mlbox_metadata.MLBox = mlbox
logging.info("MLBox instantiated!")
def binding_to_volumes(self, binding: Dict, args: List[str], volume_mounts: Dict, volumes: Dict):
for key, value in binding.items():
args.append("--" + key + "=" + volume_mount_prefix +
value['k8s']['pvc'] + "/" +
value['path'])
volume_mounts[
value['k8s']['pvc']] = client.V1VolumeMount(
name=value['k8s']['pvc'],
mount_path=volume_mount_prefix +
value['k8s']['pvc'],
)
volumes[value['k8s']['pvc']] = client.V1Volume(
name=value['k8s']['pvc'],
persistent_volume_claim=client.
V1PersistentVolumeClaimVolumeSource(
claim_name=value['k8s']['pvc']))
def create_job_manifest(self):
image: str = self.mlbox.platform.container.image
logging.info(f"Using image: {image}")
container_args: List[str] = []
container_volume_mounts: Dict = dict()
container_volumes: Dict = dict()
self.binding_to_volumes(self.mlbox.invoke.input_binding, container_args, container_volume_mounts, container_volumes)
self.binding_to_volumes(self.mlbox.invoke.output_binding, container_args, container_volume_mounts, container_volumes)
logging.info("Using Container arguments: %s" % container_args)
container = client.V1Container(name="mlcommons-box-container",
image=image,
args=container_args,
volume_mounts=list(
container_volume_mounts.values()))
pod_template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={
"app": "mlcommons-box",
"app-name": self.mlbox.name,
}),
spec=client.V1PodSpec(restart_policy="Never",
containers=[container],
volumes=list(container_volumes.values())))
job_spec = client.V1JobSpec(
template=pod_template,
backoff_limit=4,
)
self.mlbox_job_manifest = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(generate_name="mlcommons-box-" +
self.mlbox.name + "-"),
spec=job_spec,
)
logging.info("The MLBox Kubernetes Job manifest %s" %
self.mlbox_job_manifest)
def create_job(self, k8s_job_client: client.BatchV1Api):
job_creation_response = k8s_job_client.create_namespaced_job(
body=self.mlbox_job_manifest,
namespace=self.namespace,
)
logging.info("MLCommons Box k8s job created. Status='%s'" %
str(job_creation_response.status))
def run(self):
"""Run a box"""
if self.mlbox.invoke.task_name != "kubernetes":
raise RuntimeError("Uh oh. \
Task file doesn't seem to be right, please use the correct kubernetes task file."
)
logging.info("Configuring MLBox as a Kubernetes Job...")
self.create_job_manifest()
config.load_kube_config()
batch_v1 = client.BatchV1Api()
# create job on k8s cluster
self.create_job(batch_v1)
| 40.607477
| 125
| 0.561335
|
3014cee2dc2dc6bbf7d2d2412a2fa82d320d3fa8
| 6,161
|
py
|
Python
|
apps/sqoop/src/sqoop/test_base.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
apps/sqoop/src/sqoop/test_base.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 1
|
2019-05-06T15:32:21.000Z
|
2019-05-06T15:32:21.000Z
|
apps/sqoop/src/sqoop/test_base.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 2
|
2019-12-05T17:24:36.000Z
|
2021-11-22T21:21:32.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import logging
import os
import socket
import subprocess
import threading
import time
from django.conf import settings
from nose.plugins.skip import SkipTest
from desktop.lib.paths import get_run_root
from desktop.lib.rest.http_client import RestException
from hadoop import pseudo_hdfs4
from hadoop.pseudo_hdfs4 import is_live_cluster
from sqoop.client import SqoopClient
from sqoop.conf import SERVER_URL
service_lock = threading.Lock()
LOG = logging.getLogger(__name__)
class SqoopServerProvider(object):
"""
Setup a Sqoop server.
"""
TEST_PORT = '19080'
TEST_SHUTDOWN_PORT = '19081'
HOME = get_run_root('ext/sqoop/sqoop')
requires_hadoop = True
is_running = False
@classmethod
def setup_class(cls):
if not is_live_cluster():
raise SkipTest()
cls.cluster = pseudo_hdfs4.shared_cluster()
cls.client, callback = cls.get_shared_server()
cls.shutdown = [callback]
@classmethod
def initialize(cls, tmpdir):
hadoop_conf_dir = os.path.join(tmpdir, 'conf')
base_dir = os.path.join(tmpdir, 'sqoop')
log_dir = os.path.join(base_dir, 'logs')
conf_dir = os.path.join(base_dir, 'conf')
old_conf_dir = os.path.join(SqoopServerProvider.HOME, 'server/conf')
if not os.path.exists(hadoop_conf_dir):
os.mkdir(hadoop_conf_dir)
if not os.path.exists(base_dir):
os.mkdir(base_dir)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
if not os.path.exists(conf_dir):
os.mkdir(conf_dir)
for _file in ('sqoop.properties', 'sqoop_bootstrap.properties'):
with open(os.path.join(old_conf_dir, _file), 'r') as _original:
with open(os.path.join(conf_dir, _file), 'w') as _new:
for _line in _original:
line = _line.replace('${test.log.dir}', log_dir)
line = line.replace('${test.hadoop.conf.dir}', hadoop_conf_dir)
line = line.replace('${test.base.dir}', base_dir)
_new.write(line)
# This sets JAVA_OPTS with a sqoop conf... we need to use our own.
os.chmod(os.path.join(SqoopServerProvider.HOME, 'server/bin/setenv.sh'), 0)
@classmethod
def start(cls, cluster):
"""
Start oozie process.
"""
SqoopServerProvider.initialize(cluster._tmpdir)
env = os.environ
env['CATALINA_HOME'] = os.path.join(SqoopServerProvider.HOME, 'server')
env['CATALINA_PID'] = os.path.join(cluster._tmpdir, 'sqoop/sqoop.pid')
env['CATALINA_OPTS'] = """
-Dtest.log.dir=%(log_dir)s
-Dtest.host.local=%(host)s
-Dsqoop.http.port=%(http_port)s
-Dsqoop.admin.port=%(admin_port)s
""" % {
'log_dir': os.path.join(cluster._tmpdir, 'sqoop/logs'),
'host': socket.getfqdn(),
'http_port': SqoopServerProvider.TEST_PORT,
'admin_port': SqoopServerProvider.TEST_SHUTDOWN_PORT
}
env['SQOOP_HTTP_PORT'] = SqoopServerProvider.TEST_PORT
env['SQOOP_ADMIN_PORT'] = SqoopServerProvider.TEST_SHUTDOWN_PORT
env['JAVA_OPTS'] = '-Dsqoop.config.dir=%s' % os.path.join(cluster._tmpdir, 'sqoop/conf')
args = [os.path.join(SqoopServerProvider.HOME, 'bin/sqoop.sh'), 'server', 'start']
LOG.info("Executing %s, env %s, cwd %s" % (repr(args), repr(env), cluster._tmpdir))
process = subprocess.Popen(args=args, env=env, cwd=cluster._tmpdir, stdin=subprocess.PIPE)
return process
@classmethod
def get_shared_server(cls, username='sqoop', language=settings.LANGUAGE_CODE):
callback = lambda: None
with service_lock:
if not SqoopServerProvider.is_running:
# Setup
cluster = pseudo_hdfs4.shared_cluster()
if is_live_cluster():
finish = ()
else:
LOG.info('\nStarting a Mini Sqoop. Requires "tools/jenkins/jenkins.sh" to be previously ran.\n')
finish = (
SERVER_URL.set_for_testing("http://%s:%s/sqoop" % (socket.getfqdn(), SqoopServerProvider.TEST_PORT)),
)
p = cls.start(cluster)
def kill():
with open(os.path.join(cluster._tmpdir, 'sqoop/sqoop.pid'), 'r') as pidfile:
pid = pidfile.read()
LOG.info("Killing Sqoop server (pid %s)." % pid)
os.kill(int(pid), 9)
p.wait()
atexit.register(kill)
start = time.time()
started = False
sleep = 0.01
client = SqoopClient(SERVER_URL.get(), username, language)
while not started and time.time() - start < 60.0:
LOG.info('Check Sqoop status...')
try:
version = client.get_version()
except RestException, e:
LOG.exception('Exception fetching the Sqoop server version')
# Don't loop if we had an authentication error.
if e.code == 401:
raise
except Exception, e:
LOG.info('Sqoop server not started yet: %s' % e)
else:
if version:
started = True
break
time.sleep(sleep)
sleep *= 2
if not started:
raise Exception("Sqoop server took too long to come up.")
def shutdown():
for f in finish:
f()
cluster.stop()
callback = shutdown
SqoopServerProvider.is_running = True
else:
client = SqoopClient(SERVER_URL.get(), username, language)
return client, callback
| 32.426316
| 113
| 0.653141
|
a65238d7f4b5d62a069994fba073211190b72ed4
| 2,557
|
py
|
Python
|
python/sync/states.py
|
sa363/youtrack-rest-python-library
|
ed7921a4ca8aa882517a531af934836a0ef295ed
|
[
"Apache-2.0"
] | 108
|
2015-01-04T19:27:49.000Z
|
2021-11-08T09:50:08.000Z
|
python/sync/states.py
|
sa363/youtrack-rest-python-library
|
ed7921a4ca8aa882517a531af934836a0ef295ed
|
[
"Apache-2.0"
] | 12
|
2015-03-03T09:07:09.000Z
|
2020-07-01T05:26:30.000Z
|
python/sync/states.py
|
sa363/youtrack-rest-python-library
|
ed7921a4ca8aa882517a531af934836a0ef295ed
|
[
"Apache-2.0"
] | 109
|
2015-01-29T13:20:37.000Z
|
2022-03-19T09:03:28.000Z
|
import youtrack
fix = "fix"
opn = "open"
rop = "reopen"
inp = "in progress"
dis = "discuss"
crp = "can't reproduce"
obs = "obsolete"
dup = "duplicate"
asd = "as designed"
inv = "invalidate"
wat = "wait"
ver = "verify"
cvr = "can't verify"
advanced_state_machine = {
"Submitted -> Fixed" : fix,
"Submitted -> Open" : opn,
"Submitted -> In Progress" : inp,
"Submitted -> To be discussed" : dis,
"Submitted -> Can't Reproduce" : crp,
"Submitted -> Obsolete" :obs,
"Submitted -> Duplicate" :dup,
"Submitted -> As designed" : asd,
"Submitted -> Invalid" : inv,
"Open -> In Progress" : inp,
"Open -> To be discussed" : dis,
"Open -> Fixed" : fix,
"Open -> Obsolete" : obs,
"Open -> Duplicate" : dup,
"Open -> Can't Reproduce" : crp,
"Open -> As designed" : asd,
"Open -> Invalid" : inv,
"Open -> Wait for Reply" : wat,
"Reopened -> Open" : opn,
"Obsolete -> Open" : rop,
"Duplicate -> Open" : rop,
"In Progress -> Open" : rop,
"In Progress -> Fixed" : fix,
"In Progress -> Can't Reproduce" : crp,
"In Progress -> Obsolete" : obs,
"In Progress -> As designed" : asd,
"To be discussed -> In Progress" : inp,
"To be discussed -> Duplicate" : dup,
"To be discussed -> Obsolete" : obs,
"Can't Reproduce -> Open" : rop,
"As designed -> Open" : rop,
"Won't fix -> Open" : rop,
"Invalid -> Open" : rop,
"Incomplete -> Open" : rop,
"Fixed -> Open" : rop,
"Fixed -> Verified" : ver,
"Fixed -> W/O verification" : cvr,
"W/O verification -> Open" : ver,
"Verified -> Open" : rop,
"Wait for Reply -> Fixed" : fix,
"Wait for Reply -> Open" : opn,
"Wait for Reply -> In Progress" : inp,
"Wait for Reply -> To be discussed" : dis,
"Wait for Reply -> Obsolete" : obs,
"Wait for Reply -> Duplicate" : dup,
"Wait for Reply -> As designed" :asd,
"Wait for Reply -> Invalid" : inv
}
def get_event(field):
old = field.old_value[0] if len(field.old_value) == 1 else None
new = field.new_value[0] if len(field.new_value) == 1 else None
if not old or not new : raise ValueError('State can not have multiple value')
event = advanced_state_machine.get(old + ' -> ' + new)
if not event: raise LookupError("failed to apply change: State:" + old + "->" + new + " - state machine doesn't allow this transition")
return event
def get_command_for_state_change(field, with_state_machine):
return "State " + (get_event(field) if with_state_machine else field.new_value[0]) + " "
| 33.644737
| 139
| 0.593664
|
3c61ee6c6e1287e85d122963d6dbb9687fe38e01
| 469
|
py
|
Python
|
edit_image_data/change_shutil.py
|
Hoku113/support-custom-vision
|
e6008ce959cff26b2150e6b2a95a7c3cb1460719
|
[
"BSD-3-Clause"
] | null | null | null |
edit_image_data/change_shutil.py
|
Hoku113/support-custom-vision
|
e6008ce959cff26b2150e6b2a95a7c3cb1460719
|
[
"BSD-3-Clause"
] | null | null | null |
edit_image_data/change_shutil.py
|
Hoku113/support-custom-vision
|
e6008ce959cff26b2150e6b2a95a7c3cb1460719
|
[
"BSD-3-Clause"
] | null | null | null |
import pathlib
import shutil
import os
def change_suffix(file_name, from_suffix, to_suffix):
sf = pathlib.PurePath(file_name).suffix
if sf == from_suffix:
st = pathlib.PurePath(file_name).stem
to_name = st + to_suffix
shutil.move(file_name, to_name)
path = './images/'
files = os.listdir(path)
first = [f for f in files if os.path.isfile(os.path.join(path, f))]
os.chdir(path)
for x in first:
change_suffix(x, '.svg', '.jpg')
| 20.391304
| 67
| 0.671642
|
be7f644fcca8edfabff23ed8b62d7a66470d1d34
| 1,433
|
py
|
Python
|
scripts/AddObject/addgrid.py
|
AsheAnn/C4D_Python
|
3fa5e1d8b4f94efb03f820a61789d276c8a5b045
|
[
"MIT"
] | null | null | null |
scripts/AddObject/addgrid.py
|
AsheAnn/C4D_Python
|
3fa5e1d8b4f94efb03f820a61789d276c8a5b045
|
[
"MIT"
] | null | null | null |
scripts/AddObject/addgrid.py
|
AsheAnn/C4D_Python
|
3fa5e1d8b4f94efb03f820a61789d276c8a5b045
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Note:
# - This is the Python code used in Script Manager.
# Compatible:
# - Win / Mac
# - R17, R18, R19, R20, R21
# --------------------------------------------------------
import c4d
# Main function
def main():
grid = c4d.BaseObject(c4d.Oplane) # Create Grid
grid.SetRelPos(c4d.Vector(0)) # Set position
grid[c4d.PRIM_PLANE_WIDTH] = 100
grid[c4d.PRIM_PLANE_HEIGHT] = 100
grid[c4d.PRIM_PLANE_SUBW] = 1
grid[c4d.PRIM_PLANE_SUBH] = 1
grid[c4d.PRIM_AXIS] = 2
doc.InsertObject(grid) # Insert Grid
doc.SetActiveObject(grid) # Select Grid
c4d.CallCommand(12236) # Make Editable
# Execute main()
if __name__=='__main__':
main()
| 49.413793
| 200
| 0.287509
|
8dc946874bf4f6b81cfd8afa79037b4a42cb0437
| 31,159
|
py
|
Python
|
tests/test_charts.py
|
stefsmeets/pyvista
|
06b1ac01214a4c636395f139b681acea2543960c
|
[
"MIT"
] | 1,107
|
2019-05-13T06:40:26.000Z
|
2022-03-31T22:16:32.000Z
|
tests/test_charts.py
|
stefsmeets/pyvista
|
06b1ac01214a4c636395f139b681acea2543960c
|
[
"MIT"
] | 1,709
|
2019-05-13T05:52:42.000Z
|
2022-03-31T18:16:53.000Z
|
tests/test_charts.py
|
stefsmeets/pyvista
|
06b1ac01214a4c636395f139b681acea2543960c
|
[
"MIT"
] | 225
|
2019-05-16T04:24:20.000Z
|
2022-03-31T18:14:02.000Z
|
"""Test charting functionality"""
import itertools
import platform
import matplotlib.pyplot as plt
import numpy as np
import pytest
import pyvista
from pyvista import examples
from pyvista.plotting import charts, system_supports_plotting
skip_mac = pytest.mark.skipif(platform.system() == 'Darwin',
reason='MacOS CI fails when downloading examples')
skip_no_plotting = pytest.mark.skipif(
not system_supports_plotting(),
reason="Test requires system to support plotting"
)
def vtk_array_to_tuple(arr):
return tuple(arr.GetValue(i) for i in range(arr.GetNumberOfValues()))
def to_vtk_scientific(val):
parts = val.split('e')
sign, exp = parts[1][0], parts[1][1:]
exp = exp.lstrip("0") # Remove leading zeros of exponent
return parts[0] + "e" + sign + exp if exp != "" else parts[0] # Remove exponent altogether if it is 0
@pytest.fixture
def pl():
p = pyvista.Plotter(window_size=(600, 600))
p.background_color = 'w'
return p
@pytest.fixture
def chart_2d():
return pyvista.Chart2D()
@pytest.fixture
def chart_box():
return pyvista.ChartBox([[1, 2, 3]])
@pytest.fixture
def chart_pie():
return pyvista.ChartPie([1, 2, 3])
@pytest.fixture
def chart_mpl():
f, ax = plt.subplots()
ax.plot([0, 1, 2], [3, 1, 2])
return pyvista.ChartMPL(f)
@pytest.fixture
def line_plot_2d(chart_2d):
return chart_2d.line([0, 1, 2], [3, 1, 2])
@pytest.fixture
def scatter_plot_2d(chart_2d):
return chart_2d.scatter([0, 1, 2], [3, 1, 2])
@pytest.fixture
def area_plot(chart_2d):
return chart_2d.area([0, 1, 2], [2, 1, 3], [0, 2, 0])
@pytest.fixture
def bar_plot(chart_2d):
return chart_2d.bar([0, 1, 2], [[2, 1, 3], [1, 2, 0]])
@pytest.fixture
def stack_plot(chart_2d):
return chart_2d.stack([0, 1, 2], [[2, 1, 3], [1, 2, 0]])
@pytest.fixture
def box_plot(chart_box):
return chart_box.plot
@pytest.fixture
def pie_plot(chart_pie):
return chart_pie.plot
def test_pen():
c_red, c_blue = (1, 0, 0, 1), (0, 0, 1, 1)
w_thin, w_thick = 2, 10
s_dash, s_dot, s_inv = "--", ":", "|"
assert s_inv not in charts.Pen.LINE_STYLES, "New line styles added? Change this test."
# Test constructor arguments
pen = charts.Pen(color=c_red, width=w_thin, style=s_dash)
assert np.allclose(pen.color, c_red)
assert np.isclose(pen.width, w_thin)
assert pen.style == s_dash
# Test properties
pen.color = c_blue
color = [0.0, 0.0, 0.0]
pen.GetColorF(color)
color.append(pen.GetOpacity() / 255)
assert np.allclose(pen.color, c_blue)
assert np.allclose(color, c_blue)
pen.width = w_thick
assert np.isclose(pen.width, w_thick)
assert np.isclose(pen.GetWidth(), w_thick)
pen.style = s_dot
assert pen.style == s_dot
assert pen.GetLineType() == charts.Pen.LINE_STYLES[s_dot]["id"]
with pytest.raises(ValueError):
pen.style = s_inv
def test_wrapping():
width = 5
# Test wrapping of VTK Pen object
vtkPen = pyvista._vtk.vtkPen()
wrappedPen = charts.Pen(_wrap=vtkPen)
assert wrappedPen.__this__ == vtkPen.__this__
assert wrappedPen.width == vtkPen.GetWidth()
wrappedPen.width = width
assert wrappedPen.width == vtkPen.GetWidth()
assert vtkPen.GetWidth() == width
@skip_mac
def test_brush():
c_red, c_blue = (1, 0, 0, 1), (0, 0, 1, 1)
t_masonry = examples.download_masonry_texture()
t_puppy = examples.download_puppy_texture()
# Test constructor arguments
brush = charts.Brush(color=c_red, texture=t_masonry)
assert np.allclose(brush.color, c_red)
assert np.allclose(brush.texture.to_array(), t_masonry.to_array())
# Test properties
brush.color = c_blue
color = [0.0, 0.0, 0.0, 0.0]
brush.GetColorF(color)
assert np.allclose(brush.color, c_blue)
assert np.allclose(color, c_blue)
brush.texture = t_puppy
t = pyvista.Texture(brush.GetTexture())
assert np.allclose(brush.texture.to_array(), t_puppy.to_array())
assert np.allclose(t.to_array(), t_puppy.to_array())
brush.texture_interpolate = False
assert not brush.texture_interpolate
NEAREST = 0x01
assert brush.GetTextureProperties() & NEAREST
brush.texture_repeat = True
assert brush.texture_repeat
REPEAT = 0x08
assert brush.GetTextureProperties() & REPEAT
@skip_no_plotting
def test_axis(chart_2d):
l = "Y axis"
r_fix, r_auto = [2, 5], None
m = 50
tc = 10
tlabels = ["Foo", "Blub", "Spam"]
tlocs, tlocs_large = [1, 5.5, 8], [5.2, 340, 9999.999]
ts = 5
tlo = 10
# Test constructor arguments
axis = charts.Axis(label=l, range=r_fix, grid=True)
assert axis.label == l
assert np.allclose(axis.range, r_fix) and axis.behavior == "fixed"
assert axis.grid
# Test properties, using the y axis of a 2D chart
chart_2d.line([0, 1], [1, 10])
chart_2d.show()
axis = chart_2d.y_axis
axis.label = l
assert axis.label == l
assert axis.GetTitle() == l
axis.label_visible = False
assert not axis.label_visible
assert not axis.GetTitleVisible()
axis.range = r_auto
assert axis.behavior == "auto"
axis.range = r_fix
r = [0.0, 0.0]
axis.GetRange(r)
assert np.allclose(axis.range, r_fix)
assert np.allclose(r, r_fix)
assert axis.behavior == "fixed"
assert axis.GetBehavior() == charts.Axis.BEHAVIORS["fixed"]
axis.behavior = "auto"
assert axis.behavior == "auto"
assert axis.GetBehavior() == charts.Axis.BEHAVIORS["auto"]
with pytest.raises(ValueError):
axis.behavior = "invalid"
axis.margin = m
assert axis.margin == m
assert axis.GetMargins()[0] == m
axis.log_scale = True # Log scale can be enabled for the currently drawn plot
chart_2d.show() # We have to call show to update all chart properties (calls Update and Paint methods of chart/plot objects).
assert axis.log_scale
assert axis.GetLogScaleActive()
axis.log_scale = False
chart_2d.show()
assert not axis.log_scale
assert not axis.GetLogScaleActive()
# TODO: following lines cause "vtkMath::Jacobi: Error extracting eigenfunctions" warning to be printed.
# This is a VTK issue that will be fixed once PR (!8618) is merged.
chart_2d.line([0, 1], [-10, 10]) # Plot for which log scale cannot be enabled
axis.log_scale = True
chart_2d.show()
assert not axis.log_scale
assert not axis.GetLogScaleActive()
axis.grid = False
assert not axis.grid
assert not axis.GetGridVisible()
axis.visible = False
assert not axis.visible
assert not axis.GetAxisVisible()
axis.toggle()
assert axis.visible
assert axis.GetAxisVisible()
tc0 = axis.tick_count
axis.tick_count = tc
assert axis.tick_count == tc
assert axis.GetNumberOfTicks() == tc
axis.tick_count = None
assert axis.tick_count == tc0
assert axis.GetNumberOfTicks() == tc0
axis.tick_count = -1
assert axis.tick_count == tc0
assert axis.GetNumberOfTicks() == tc0
tlocs0 = axis.tick_locations
tlabels0 = axis.tick_labels
axis.tick_locations = tlocs
axis.tick_labels = tlabels
assert np.allclose(axis.tick_locations, tlocs)
assert np.allclose(axis.GetTickPositions(), tlocs)
assert tuple(axis.tick_labels) == tuple(tlabels)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(tlabels)
axis.tick_labels = "2f"
chart_2d.show()
assert tuple(axis.tick_labels) == tuple(f"{loc:.2f}" for loc in tlocs)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(f"{loc:.2f}" for loc in tlocs)
assert axis.GetNotation() == charts.Axis.FIXED_NOTATION
assert axis.GetPrecision() == 2
axis.tick_labels = "4e"
axis.tick_locations = tlocs_large # Add some more variety to labels
chart_2d.show()
assert tuple(axis.tick_labels) == tuple(to_vtk_scientific(f"{loc:.4e}") for loc in tlocs_large)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(to_vtk_scientific(f"{loc:.4e}") for loc in tlocs_large)
assert axis.GetNotation() == charts.Axis.SCIENTIFIC_NOTATION
assert axis.GetPrecision() == 4
axis.tick_locations = None
axis.tick_labels = None
chart_2d.show()
assert np.allclose(axis.tick_locations, tlocs0)
assert np.allclose(axis.GetTickPositions(), tlocs0)
assert tuple(axis.tick_labels) == tuple(tlabels0)
assert vtk_array_to_tuple(axis.GetTickLabels()) == tuple(tlabels0)
axis.tick_size = ts
assert axis.tick_size == ts
assert axis.GetTickLength() == ts
axis.tick_labels_offset = tlo
assert axis.tick_labels_offset == tlo
assert axis.GetLabelOffset() == tlo
axis.tick_labels_visible = False
assert not axis.tick_labels_visible
assert not axis.GetLabelsVisible()
assert not axis.GetRangeLabelsVisible()
axis.ticks_visible = False
assert not axis.ticks_visible
assert not axis.GetTicksVisible()
def test_axis_label_font_size(chart_2d):
_ = chart_2d.line([0, 1, 2], [2, 1, 3])
axis = chart_2d.x_axis
font_size = 20
axis.label_size = font_size
assert axis.label_size == font_size
assert axis.GetTitleProperties().GetFontSize() == font_size
axis.tick_label_size = font_size
assert axis.tick_label_size == font_size
assert axis.GetLabelProperties().GetFontSize() == font_size
@skip_no_plotting
@pytest.mark.parametrize("chart_f", ("chart_2d", "chart_box", "chart_pie", "chart_mpl"))
def test_chart_common(pl, chart_f, request):
# Test the common chart functionalities
chart = request.getfixturevalue(chart_f)
title = "Chart title"
c_red, c_blue = (1, 0, 0, 1), (0, 0, 1, 1)
bw = 10
bs = "--"
# Check scene and renderer properties
assert chart._scene is None
assert chart._renderer is None
pl.add_chart(chart)
assert chart._scene is pl.renderer._charts._scene
assert chart._renderer is pl.renderer and chart._renderer is pl.renderer._charts._renderer
with pytest.raises((AssertionError, ValueError)):
chart.size = (-1, 1)
with pytest.raises((AssertionError, ValueError)):
chart.loc = (-1, 1)
try: # Try block for now as not all charts support a custom size and loc
chart.size = (0.5, 0.5)
chart.loc = (0.25, 0.25)
assert chart.size == (0.5, 0.5)
assert chart.loc == (0.25, 0.25)
except ValueError:
pass
# Check geometry and resizing
w, h = pl.window_size
chart._render_event()
assert chart._geometry == (chart.loc[0]*w, chart.loc[1]*h, chart.size[0]*w, chart.size[1]*h)
w, h = pl.window_size = [200, 200]
chart._render_event()
assert chart._geometry == (chart.loc[0]*w, chart.loc[1]*h, chart.size[0]*w, chart.size[1]*h)
# Check is_within
assert chart._is_within(((chart.loc[0]+chart.size[0]/2)*w, (chart.loc[1]+chart.size[1]/2)*h))
assert not chart._is_within(((chart.loc[0]+chart.size[0]/2)*w, chart.loc[1]*h-5))
assert not chart._is_within((chart.loc[0]*w-5, (chart.loc[1]+chart.size[1]/2)*h))
assert not chart._is_within((chart.loc[0]*w-5, chart.loc[1]*h-5))
chart.border_color = c_red
assert np.allclose(chart.border_color, c_red)
chart.border_width = bw
assert chart.border_width == bw
chart.border_style = bs
assert chart.border_style == bs
chart.background_color = c_blue
assert np.allclose(chart.background_color, c_blue)
# Check remaining properties and methods
chart.visible = False
assert not chart.visible
assert not chart.GetVisible()
chart.toggle()
assert chart.visible
assert chart.GetVisible()
chart.title = title
assert chart.title == title
chart.legend_visible = False
assert not chart.legend_visible
@pytest.mark.parametrize("plot_f", ("line_plot_2d", "scatter_plot_2d", "area_plot", "bar_plot", "stack_plot", "box_plot", "pie_plot"))
def test_plot_common(plot_f, request):
# Test the common plot functionalities
plot = request.getfixturevalue(plot_f)
c = (1, 0, 1, 1)
w = 5
s = "-."
l = "Label"
plot.color = c
assert np.allclose(plot.color, c)
assert np.allclose(plot.brush.color, c)
if hasattr(plot, "GetPen"):
assert plot.pen.__this__ == plot.GetPen().__this__
if hasattr(plot, "GetBrush"):
assert plot.brush.__this__ == plot.GetBrush().__this__
plot.line_width = w
assert plot.pen.width == w
plot.line_style = s
assert plot.pen.style == s
plot.label = l
assert plot.label == l
assert plot.GetLabel() == l
plot.visible = False
assert not plot.visible
assert not plot.GetVisible()
plot.toggle()
assert plot.visible
assert plot.GetVisible()
@pytest.mark.parametrize("plot_f", ("bar_plot", "stack_plot", "box_plot", "pie_plot"))
def test_multicomp_plot_common(plot_f, request):
# Test the common multicomp plot functionalities
plot = request.getfixturevalue(plot_f)
cs = "spectrum"
cs_colors = [(0.0, 0.0, 0.0, 1.0),
(0.8941176470588236, 0.10196078431372549, 0.10980392156862745, 1.0),
(0.21568627450980393, 0.49411764705882355, 0.7215686274509804, 1.0),
(0.30196078431372547, 0.6862745098039216, 0.2901960784313726, 1.0),
(0.596078431372549, 0.3058823529411765, 0.6392156862745098, 1.0),
(1.0, 0.4980392156862745, 0.0, 1.0),
(0.6509803921568628, 0.33725490196078434, 0.1568627450980392, 1.0)]
colors = [(1, 0, 1, 1), (0, 1, 1, 1), (1, 1, 0, 1)]
labels = ["Foo", "Spam", "Bla"]
plot.color_scheme = cs
assert plot.color_scheme == cs
assert plot._color_series.GetColorScheme() == plot.COLOR_SCHEMES[cs]["id"]
assert np.allclose(plot.colors, cs_colors)
series_colors = [plot._from_c3ub(plot._color_series.GetColor(i)) for i in range(len(cs_colors))]
assert np.allclose(series_colors, cs_colors)
lookup_colors = [plot._lookup_table.GetTableValue(i) for i in range(len(cs_colors))]
assert np.allclose(lookup_colors, cs_colors)
assert np.allclose(plot.brush.color, cs_colors[0])
plot.colors = None
assert plot.color_scheme == plot.DEFAULT_COLOR_SCHEME
plot.colors = cs
assert plot.color_scheme == cs
plot.colors = colors
assert np.allclose(plot.colors, colors)
series_colors = [plot._from_c3ub(plot._color_series.GetColor(i)) for i in range(len(colors))]
assert np.allclose(series_colors, colors)
lookup_colors = [plot._lookup_table.GetTableValue(i) for i in range(len(colors))]
assert np.allclose(lookup_colors, colors)
assert np.allclose(plot.brush.color, colors[0])
plot.color = colors[1]
assert np.allclose(plot.color, colors[1])
assert np.allclose(plot.colors, [colors[1]])
assert np.allclose(plot.brush.color, colors[1])
plot.labels = labels
assert tuple(plot.labels) == tuple(labels)
assert plot.label == labels[0]
plot.labels = None
assert plot.labels == []
assert plot.label == ""
plot.label = labels[1]
assert tuple(plot.labels) == (labels[1],)
assert plot.label == labels[1]
plot.label = None
assert plot.labels == []
assert plot.label == ""
def test_lineplot2d(line_plot_2d):
x = [-2, -1, 0, 1, 2]
y = [4, 1, 0, -1, -4]
c = (1, 0, 1, 1)
w = 5
s = "-."
l = "Line"
# Test constructor
plot = charts.LinePlot2D(x, y, c, w, s, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y)
assert np.allclose(plot.color, c)
assert plot.line_width == w
assert plot.line_style == s
assert plot.label == l
# Test remaining properties
line_plot_2d.update(x, y)
assert np.allclose(line_plot_2d.x, x)
assert np.allclose(line_plot_2d.y, y)
def test_scatterplot2d(scatter_plot_2d):
x = [-2, -1, 0, 1, 2]
y = [4, 1, 0, -1, -4]
c = (1, 0, 1, 1)
sz = 5
st, st_inv = "o", "^"
l = "Scatter"
assert st_inv not in charts.ScatterPlot2D.MARKER_STYLES, "New marker styles added? Change this test."
# Test constructor
plot = charts.ScatterPlot2D(x, y, c, sz, st, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y)
assert np.allclose(plot.color, c)
assert plot.marker_size == sz
assert plot.marker_style == st
assert plot.label == l
# Test remaining properties
scatter_plot_2d.update(x, y)
assert np.allclose(scatter_plot_2d.x, x)
assert np.allclose(scatter_plot_2d.y, y)
scatter_plot_2d.marker_size = sz
assert scatter_plot_2d.marker_size == sz
assert scatter_plot_2d.GetMarkerSize() == sz
scatter_plot_2d.marker_style = None
assert scatter_plot_2d.marker_style == ""
scatter_plot_2d.marker_style = st
assert scatter_plot_2d.marker_style == st
assert scatter_plot_2d.GetMarkerStyle() == scatter_plot_2d.MARKER_STYLES[st]["id"]
with pytest.raises(ValueError):
scatter_plot_2d.marker_style = st_inv
def test_areaplot(area_plot):
x = [-2, -1, 0, 1, 2]
y1 = [4, 1, 0, -1, -4]
y2 = [-4, -2, 0, 2, 4]
c = (1, 0, 1, 1)
l = "Line"
# Test constructor
plot = charts.AreaPlot(x, y1, y2, c, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y1, y1)
assert np.allclose(plot.y2, y2)
assert np.allclose(plot.color, c)
assert plot.label == l
# Test remaining properties
area_plot.update(x, y1, y2)
assert np.allclose(area_plot.x, x)
assert np.allclose(area_plot.y1, y1)
assert np.allclose(area_plot.y2, y2)
def test_barplot(bar_plot):
x = [0, 1, 2]
y = [[1, 2, 3], [2, 1, 0], [1, 1, 1]]
c = [(1, 0, 1, 1), (1, 1, 0, 1), (0, 1, 1, 1)]
ori, ori_inv = "H", "I"
l = ["Foo", "Spam", "Bla"]
assert ori_inv not in charts.BarPlot.ORIENTATIONS, "New orientations added? Change this test."
# Test multi comp constructor
plot = charts.BarPlot(x, y, c, ori, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y)
assert np.allclose(plot.colors, c)
assert plot.orientation == ori
assert plot.labels == l
# Test single comp constructor
plot = charts.BarPlot(x, y[0], c[0], ori, l[0])
assert np.allclose(plot.x, x)
assert np.allclose(plot.y, y[0])
assert np.allclose(plot.color, c[0])
assert plot.orientation == ori
assert plot.label == l[0]
# Test multi and single comp constructors with inconsistent arguments
with pytest.raises(ValueError):
charts.BarPlot(x, y, c[0], ori, l)
# charts.BarPlot(x, y, c, off, ori, l[0]) # This one is valid
with pytest.raises(ValueError):
charts.BarPlot(x, y[0], c, ori, l[0])
with pytest.raises(ValueError):
charts.BarPlot(x, y[0], c[0], ori, l)
# Test remaining properties
bar_plot.update(x, y)
assert np.allclose(bar_plot.x, x)
assert np.allclose(bar_plot.y, y)
bar_plot.orientation = ori
assert bar_plot.orientation == ori
assert bar_plot.GetOrientation() == bar_plot.ORIENTATIONS[ori]
with pytest.raises(ValueError):
bar_plot.orientation = ori_inv
def test_stackplot(stack_plot):
x = [0, 1, 2]
ys = [[1, 2, 3], [2, 1, 0], [1, 1, 1]]
c = [(1, 0, 1, 1), (1, 1, 0, 1), (0, 1, 1, 1)]
l = ["Foo", "Spam", "Bla"]
# Test multi comp constructor
plot = charts.StackPlot(x, ys, c, l)
assert np.allclose(plot.x, x)
assert np.allclose(plot.ys, ys)
assert np.allclose(plot.colors, c)
assert plot.labels == l
# Test single comp constructor
plot = charts.StackPlot(x, ys[0], c[0], l[0])
assert np.allclose(plot.x, x)
assert np.allclose(plot.ys, ys[0])
assert np.allclose(plot.color, c[0])
assert plot.label == l[0]
# Test multi and single comp constructors with inconsistent arguments
with pytest.raises(ValueError):
charts.StackPlot(x, ys, c[0], l)
# charts.StackPlot(x, ys, c, l[0]) # This one is valid
with pytest.raises(ValueError):
charts.StackPlot(x, ys[0], c, l[0])
with pytest.raises(ValueError):
charts.StackPlot(x, ys[0], c[0], l)
# Test remaining properties
stack_plot.update(x, ys)
assert np.allclose(stack_plot.x, x)
assert np.allclose(stack_plot.ys, ys)
@skip_no_plotting
def test_chart_2d(pl, chart_2d):
size = (0.5, 0.5)
loc = (0.25, 0.25)
lx = "X label"
ly = "Y label"
rx = [0, 5]
ry = [0, 1]
x = np.arange(11)-5
y = x**2
ys = [np.sin(x), np.cos(x), np.tanh(x)]
col = (1, 0, 1, 1)
cs = "citrus"
sz = 5
ms = "d"
w = 10
ls = "-."
ori = "V"
# Test constructor
chart = pyvista.Chart2D(size, loc, lx, ly, False)
assert chart.size == size
assert chart.loc == loc
assert chart.x_label == lx
assert chart.y_label == ly
assert not chart.grid
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (loc[0]*r_w, loc[1]*r_h, size[0]*r_w, size[1]*r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (loc[0]*r_w/2, loc[1]*r_h/2, size[0]*r_w/2, size[1]*r_h/2))
# Test parse_format
colors = itertools.chain(pyvista.hexcolors, pyvista.color_char_to_word, ["#fa09b6", ""])
for m in charts.ScatterPlot2D.MARKER_STYLES:
for l in charts.Pen.LINE_STYLES:
for c in colors:
cp = "b" if c == "" else c
assert (m, l, cp) == chart_2d._parse_format(m + l + c)
assert (m, l, cp) == chart_2d._parse_format(m + c + l)
assert (m, l, cp) == chart_2d._parse_format(l + m + c)
assert (m, l, cp) == chart_2d._parse_format(l + c + m)
assert (m, l, cp) == chart_2d._parse_format(c + m + l)
assert (m, l, cp) == chart_2d._parse_format(c + l + m)
# Test plotting methods
s, l = chart_2d.plot(x, y, "")
assert s is None and l is None
assert len([*chart_2d.plots()]) == 0
s, l = chart_2d.plot(y, "-")
assert s is None and l is not None
assert l in chart_2d.plots("line")
chart_2d.remove_plot(l)
assert len([*chart_2d.plots()]) == 0
s, l = chart_2d.plot(y, "x")
assert s is not None and l is None
assert s in chart_2d.plots("scatter")
chart_2d.clear("scatter")
assert len([*chart_2d.plots()]) == 0
s, l = chart_2d.plot(x, y, "x-")
assert s is not None and l is not None
assert s in chart_2d.plots("scatter") and l in chart_2d.plots("line")
chart_2d.plot(x, y, "x-") # Check clearing of multiple plots (of the same type)
chart_2d.clear()
assert len([*chart_2d.plots()]) == 0
s = chart_2d.scatter(x, y, col, sz, ms, lx)
assert np.allclose(s.x, x)
assert np.allclose(s.y, y)
assert np.allclose(s.color, col)
assert s.marker_size == sz
assert s.marker_style == ms
assert s.label == lx
assert s in chart_2d.plots("scatter")
assert chart_2d.GetPlotIndex(s) >= 0
l = chart_2d.line(x, y, col, w, ls, lx)
assert np.allclose(l.x, x)
assert np.allclose(l.y, y)
assert np.allclose(l.color, col)
assert l.line_width == w
assert l.line_style == ls
assert l.label == lx
assert l in chart_2d.plots("line")
assert chart_2d.GetPlotIndex(l) >= 0
a = chart_2d.area(x, -y, y, col, lx)
assert np.allclose(a.x, x)
assert np.allclose(a.y1, -y)
assert np.allclose(a.y2, y)
assert np.allclose(a.color, col)
assert a.label == lx
assert a in chart_2d.plots("area")
assert chart_2d.GetPlotIndex(a) >= 0
b = chart_2d.bar(x, -y, col, ori, lx)
assert np.allclose(b.x, x)
assert np.allclose(b.y, -y)
assert np.allclose(b.color, col)
assert b.orientation == ori
assert b.label == lx
assert b in chart_2d.plots("bar")
assert chart_2d.GetPlotIndex(b) >= 0
s = chart_2d.stack(x, ys, cs, [lx, ly])
assert np.allclose(s.x, x)
assert np.allclose(s.ys, ys)
assert s.color_scheme == cs
assert tuple(s.labels) == (lx, ly)
assert s in chart_2d.plots("stack")
assert chart_2d.GetPlotIndex(s) >= 0
inv_type = "blub"
with pytest.raises(KeyError):
next(chart_2d.plots(inv_type))
with pytest.raises(KeyError):
chart_2d.clear(inv_type)
assert len([*chart_2d.plots()]) == 5
chart_2d.clear()
assert len([*chart_2d.plots()]) == 0
with pytest.raises(ValueError):
chart_2d.remove_plot(s)
# Check remaining properties
assert chart_2d.x_axis.__this__ == chart_2d.GetAxis(charts.Axis.BOTTOM).__this__
assert chart_2d.y_axis.__this__ == chart_2d.GetAxis(charts.Axis.LEFT).__this__
chart_2d.x_label = lx
assert chart_2d.x_label == lx
assert chart_2d.x_axis.label == lx
chart_2d.y_label = ly
assert chart_2d.y_label == ly
assert chart_2d.y_axis.label == ly
chart_2d.x_range = rx
assert np.allclose(chart_2d.x_range, rx)
assert np.allclose(chart_2d.x_axis.range, rx)
chart_2d.y_range = ry
assert np.allclose(chart_2d.y_range, ry)
assert np.allclose(chart_2d.y_axis.range, ry)
chart_2d.grid = True
assert chart_2d.grid
assert chart_2d.x_axis.grid and chart_2d.y_axis.grid
chart_2d.hide_axes()
for axis in (chart_2d.x_axis, chart_2d.y_axis):
assert not (axis.visible or axis.label_visible or axis.ticks_visible or axis.tick_labels_visible or axis.grid)
@skip_no_plotting
def test_chart_box(pl, chart_box, box_plot):
data = [[0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 5, 5, 6]]
stats = [np.quantile(d, [0.0, 0.25, 0.5, 0.75, 1.0]) for d in data]
cs = "wild_flower"
ls = ["Datalabel"]
# Test constructor
chart = pyvista.ChartBox(data, cs, ls)
assert np.allclose(chart.plot.data, data)
assert chart.plot.color_scheme == cs
assert tuple(chart.plot.labels) == tuple(ls)
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (0, 0, r_w, r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (0, 0, r_w/2, r_h/2))
# Test remaining properties
assert chart_box.loc == (0, 0)
assert chart_box.size == (1, 1)
assert chart_box.plot.__this__ == chart_box.GetPlot(0).__this__
box_plot.update(data)
assert np.allclose(box_plot.data, data)
assert np.allclose(box_plot.stats, stats)
@skip_no_plotting
def test_chart_pie(pl, chart_pie, pie_plot):
data = [3, 4, 5]
cs = "wild_flower"
ls = ["Tic", "Tac", "Toe"]
# Test constructor
chart = pyvista.ChartPie(data, cs, ls)
assert np.allclose(chart.plot.data, data)
assert chart.plot.color_scheme == cs
assert tuple(chart.plot.labels) == tuple(ls)
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (0, 0, r_w, r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (0, 0, r_w/2, r_h/2))
# Test remaining properties
assert chart_pie.loc == (0, 0)
assert chart_pie.size == (1, 1)
assert chart_pie.plot.__this__ == chart_pie.GetPlot(0).__this__
pie_plot.update(data)
assert np.allclose(pie_plot.data, data)
@skip_no_plotting
def test_chart_mpl(pl, chart_mpl):
size = (0.5, 0.5)
loc = (0.25, 0.25)
# Test constructor
f, ax = plt.subplots()
chart = pyvista.ChartMPL(f, size, loc)
assert chart.size == size
assert chart.loc == loc
# Test geometry and resizing
pl.add_chart(chart)
r_w, r_h = chart._renderer.GetSize()
pl.show(auto_close=False)
assert np.allclose(chart._geometry, (loc[0]*r_w, loc[1]*r_h, size[0]*r_w, size[1]*r_h))
assert np.allclose(chart.position, (loc[0]*r_w, loc[1]*r_h))
assert np.allclose(chart._canvas.get_width_height(), (size[0]*r_w, size[1]*r_h))
pl.window_size = (int(pl.window_size[0]/2), int(pl.window_size[1]/2))
pl.show() # This will also call chart._resize
assert np.allclose(chart._geometry, (loc[0]*r_w/2, loc[1]*r_h/2, size[0]*r_w/2, size[1]*r_h/2))
assert np.allclose(chart.position, (loc[0]*r_w/2, loc[1]*r_h/2))
assert np.allclose(chart._canvas.get_width_height(), (size[0]*r_w/2, size[1]*r_h/2))
@skip_no_plotting
def test_charts(pl):
win_size = pl.window_size
top_left = pyvista.Chart2D(size=(0.5, 0.5), loc=(0, 0.5))
bottom_right = pyvista.Chart2D(size=(0.5, 0.5), loc=(0.5, 0))
# Test add_chart
pl.add_chart(top_left)
assert pl.renderers[0].__this__ == top_left._renderer.__this__
assert pl.renderers[0]._charts._scene.__this__ == top_left._scene.__this__
pl.add_chart(bottom_right)
assert len(pl.renderers[0]._charts) == 2
# Test toggle_interaction
pl.show(auto_close=False) # We need to plot once to let the charts compute their true geometry
assert not top_left.GetInteractive()
assert not bottom_right.GetInteractive()
assert pl.renderers[0]._charts.toggle_interaction((0.75*win_size[0], 0.25*win_size[1])) is bottom_right._scene
assert not top_left.GetInteractive()
assert bottom_right.GetInteractive()
assert pl.renderers[0]._charts.toggle_interaction((0, 0)) is None
assert not top_left.GetInteractive()
assert not bottom_right.GetInteractive()
# Test remove_chart
pl.remove_chart(1)
assert len(pl.renderers[0]._charts) == 1
assert pl.renderers[0]._charts[0] == top_left
assert top_left in pl.renderers[0]._charts
pl.remove_chart(top_left)
assert len(pl.renderers[0]._charts) == 0
# Test deep_clean
pl.add_chart(top_left, bottom_right)
pl.deep_clean()
assert len(pl.renderers[0]._charts) == 0
assert pl.renderers[0]._charts._scene is None
@skip_no_plotting
def test_iren_context_style(pl):
chart = pyvista.Chart2D(size=(0.5, 0.5), loc=(0.5, 0.5))
win_size = pl.window_size
pl.add_chart(chart)
pl.show(auto_close=False) # We need to plot once to let the charts compute their true geometry
style = pl.iren._style
style_class = pl.iren._style_class
# Simulate right click on the chart:
pl.iren._mouse_right_button_press(int(0.75*win_size[0]), int(0.75*win_size[1]))
assert chart.GetInteractive()
assert pl.iren._style == "Context"
assert pl.iren._style_class == pl.iren._context_style
assert pl.iren._context_style.GetScene().__this__ == chart._scene.__this__
# Simulate right click outside the chart:
pl.iren._mouse_right_button_press(0, 0)
assert not chart.GetInteractive()
assert pl.iren._style == style
assert pl.iren._style_class == style_class
assert pl.iren._context_style.GetScene() is None
def test_get_background_texture(chart_2d):
t_puppy = examples.download_puppy_texture()
chart_2d
chart_2d.background_texture = t_puppy
assert chart_2d.background_texture == t_puppy
| 32.764458
| 134
| 0.657274
|
88415ef561b33584e4d5e0d598e84242a9154359
| 1,785
|
py
|
Python
|
littlelambocoin/types/blockchain_format/tree_hash.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | 6
|
2021-07-15T16:52:46.000Z
|
2021-09-27T16:57:08.000Z
|
littlelambocoin/types/blockchain_format/tree_hash.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | 6
|
2021-07-27T08:17:34.000Z
|
2021-11-30T11:39:19.000Z
|
littlelambocoin/types/blockchain_format/tree_hash.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | 7
|
2021-08-15T15:10:58.000Z
|
2021-10-04T16:47:39.000Z
|
"""
This is an implementation of `sha256_treehash`, used to calculate
puzzle hashes in clvm.
This implementation goes to great pains to be non-recursive so we don't
have to worry about blowing out the python stack.
"""
from typing import Optional, Set
from clvm import CLVMObject
from littlelambocoin.types.blockchain_format.sized_bytes import bytes32
from littlelambocoin.util.hash import std_hash
def sha256_treehash(sexp: CLVMObject, precalculated: Optional[Set[bytes32]] = None) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if precalculated is None:
precalculated = set()
def handle_sexp(sexp_stack, op_stack, precalculated: Set[bytes32]) -> None:
sexp = sexp_stack.pop()
if sexp.pair:
p0, p1 = sexp.pair
sexp_stack.append(p0)
sexp_stack.append(p1)
op_stack.append(handle_pair)
op_stack.append(handle_sexp)
op_stack.append(roll)
op_stack.append(handle_sexp)
else:
if sexp.atom in precalculated:
r = sexp.atom
else:
r = std_hash(b"\1" + sexp.atom)
sexp_stack.append(r)
def handle_pair(sexp_stack, op_stack, precalculated) -> None:
p0 = sexp_stack.pop()
p1 = sexp_stack.pop()
sexp_stack.append(std_hash(b"\2" + p0 + p1))
def roll(sexp_stack, op_stack, precalculated) -> None:
p0 = sexp_stack.pop()
p1 = sexp_stack.pop()
sexp_stack.append(p0)
sexp_stack.append(p1)
sexp_stack = [sexp]
op_stack = [handle_sexp]
while len(op_stack) > 0:
op = op_stack.pop()
op(sexp_stack, op_stack, precalculated)
return bytes32(sexp_stack[0])
| 30.254237
| 95
| 0.638095
|
29f731ea0645b09056e778318e04bce9a210a79e
| 71,085
|
py
|
Python
|
plugins/modules/aws_elastigroup.py
|
spotinst/spot-ansible-cloud-modules
|
c0814c9dbf69cb7080b02e16b6c7c82a3c376db2
|
[
"MIT"
] | 1
|
2022-02-16T02:09:00.000Z
|
2022-02-16T02:09:00.000Z
|
plugins/modules/aws_elastigroup.py
|
spotinst/spot-ansible-cloud-modules
|
c0814c9dbf69cb7080b02e16b6c7c82a3c376db2
|
[
"MIT"
] | 1
|
2022-01-23T14:13:42.000Z
|
2022-01-23T14:13:50.000Z
|
plugins/modules/aws_elastigroup.py
|
spotinst/spot-ansible-cloud-modules
|
c0814c9dbf69cb7080b02e16b6c7c82a3c376db2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
module: aws_elastigroup
version_added: 1.0.0
short_description: Manage (Create, Update, Delete) Spot AWS Elastigroups
author: Spot by NetApp (@talzur)
description:
- Can create, update, or delete Spotinst AWS Elastigroups
Launch configuration is part of the elastigroup configuration,
so no additional modules are necessary for handling the launch configuration.
You will have to have a credentials file in this location - <home>/.spotinst/credentials
The credentials file must contain a row that looks like this
token = <YOUR TOKEN>
Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-)
extends_documentation_fragment:
- spot.cloud_modules.requirements
options:
credentials_path:
type: str
default: "/root/.spotinst/credentials"
description:
- Optional parameter that allows to set a non-default credentials path.
account_id:
type: str
description:
- Optional parameter that allows to set an account-id inside the module configuration. By default this is retrieved from the credentials path
token:
version_added: 2.8
type: str
description:
- Optional parameter that allows to set an token inside the module configuration. By default this is retrieved from the credentials path
state:
type: str
choices:
- present
- absent
default: present
description:
- create update or delete
auto_apply_tags:
type: bool
description:
- Weather or not to apply tags without rolling group
version_added: 2.8
availability_vs_cost:
type: str
choices:
- availabilityOriented
- costOriented
- balanced
description:
- The strategy orientation.
required: true
availability_zones:
type: list
description:
- a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are
name (String),
subnet_id (String),
subnet_ids(List of Strings),
placement_group_name (String),
required: true
block_device_mappings:
type: list
description:
- a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
You can specify virtual devices and EBS volumes.;
'[{"key":"value", "key":"value"}]';
keys allowed are
device_name (List of Strings),
virtual_name (String),
no_device (String),
ebs (Object, expects the following keys-
delete_on_termination(Boolean),
encrypted(Boolean),
iops (Integer),
snapshot_id(Integer),
volume_type(String),
volume_size(Integer))
code_deploy:
version_added: 2.8
type: dict
description:
- code deploy integration configuration
suboptions:
deployment_groups:
description: deployment groups configurations
type: list
suboptions:
application_name:
description: application name
type: str
deployment_group_name:
description: deployment group name
type: str
clean_up_on_failure:
description: clean up on failure
type: bool
terminate_instance_on_failure:
description: terminate instance on failure
type: bool
chef:
type: dict
description:
- The Chef integration configuration.;
Expects the following keys-
chef_server (String),
organization (String),
user (String),
pem_key (String),
chef_version (String)
docker_swarm:
type: dict
version_added: 2.8
description:
- The Docker Swarm integration configuration.;
Expects the following keys-
master_host (String),
master_port (Integer),
auto_scale (Object expects the following keys-
is_enabled (Boolean),
cooldown (Integer),
headroom (Object expects the following keys-
cpu_per_unit (Integer),
memory_per_unit (Integer),
num_of_units (Integer)),
key (String),
value (String)),
down (Object expecting the following key -
down_evaluation_periods (Integer)))
draining_timeout:
type: int
description:
- Time for instance to be drained from incoming requests and deregistered from ELB before termination.
ebs_optimized:
type: bool
description:
- Enable EBS optimization for supported instances which are not enabled by default.;
Note - additional charges will be applied.
ebs_volume_pool:
type: list
description:
- a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
'[{"key":"value", "key":"value"}]';
keys allowed are -
volume_ids (List of Strings),
device_name (String)
ecs:
type: dict
description:
- The ECS integration configuration.;
Expects the following keys -
cluster_name (String),
auto_scale (Object expects the following keys -
is_enabled (Boolean),
is_auto_config (Boolean),
cooldown (Integer),
headroom (Object expects the following keys -
cpu_per_unit (Integer),
memory_per_unit (Integer),
num_of_units (Integer)),
attributes (List of Objects expecting the following keys -
key (String),
value (String)),
down (Object expecting the following key -
down_evaluation_periods (Integer)))
elastic_ips:
type: list
description:
- List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
elastic_beanstalk:
version_added: 2.8
type: dict
description:
- The ElasticBeanstalk integration configuration.;
Expects the following keys -
environment_id (String)
deployment_preferences (Object expects the following keys -
automatic_roll (Boolean),
batch_size_percentage (Integer),
grace_period (Integer),
strategy (Object expects the following keys-
action (String),
should_drain_instances (Boolean)))
fallback_to_od:
type: bool
description:
- In case of no spots available, Elastigroup will launch an On-demand instance instead
health_check_grace_period:
type: int
description:
- The amount of time, in seconds, after the instance has launched to start and check its health.
default: 300
health_check_unhealthy_duration_before_replacement:
type: int
description:
- Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
health_check_type:
type: str
choices:
- ELB
- HCS
- TARGET_GROUP
- MLB
- EC2
description:
- The service to use for the health check.
iam_role_name:
type: str
description:
- The instance profile iamRole name
- Only use iam_role_arn, or iam_role_name
iam_role_arn:
type: str
description:
- The instance profile iamRole arn
- Only use iam_role_arn, or iam_role_name
id:
type: str
description:
- The group id if it already exists and you want to update, or delete it.
This will not work unless the uniqueness_by field is set to id.
When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
ignore_changes:
type: list
choices:
- image_id
- target
description:
- list of fields on which changes should be ignored when updating
image_id:
type: str
description:
- The image Id used to launch the instance.;
In case of conflict between Instance type and image type, an error will be returned
required: true
key_pair:
type: str
description:
- Specify a Key Pair to attach to the instances
required: true
kubernetes:
type: dict
description:
- The Kubernetes integration configuration.;
Expects the following keys -
api_server (String),
token (String),
integration_mode (String),
cluster_identifier (String),
auto_scale (Object expects the following keys -
is_enabled (Boolean),
is_auto_config (Boolean),
cooldown (Integer),
headroom (Object expects the following keys -
cpu_per_unit (Integer),
memory_per_unit (Integer),
num_of_units (Integer)),
labels (List of Objects expecting the following keys -
key (String),
value (String)),
down (Object expecting the following key -
down_evaluation_periods (Integer)))
lifetime_period:
type: str
description:
- lifetime period
load_balancers:
type: list
description:
- List of classic ELB names
max_size:
type: int
description:
- The upper limit number of instances that you can scale up to
required: true
mesosphere:
type: dict
description:
- (Object) The Mesosphere integration configuration.
Expects the following key -
api_server (String)
min_size:
type: int
description:
- The lower limit number of instances that you can scale down to
required: true
mlb_load_balancers:
version_added: 2.8
type: list
description:
- Objects representing mlb's.;
Expects the following keys-
target_set_id (String)
balancer_id (String)
auto_weight (String)
az_awareness (String)
type (String) MULTAI_TARGET_SET
mlb_runtime:
version_added: 2.8
type: dict
description:
- The Spotinst MLB Runtime integration configuration.;
Expects the following keys-
deployment_id (String) The runtime's deployment id
monitoring:
type: bool
description:
- Describes whether instance Enhanced Monitoring is enabled
required: true
name:
type: str
description:
- Unique name for elastigroup to be created, updated or deleted
required: true
network_interfaces:
type: list
description:
- a list of hash/dictionaries of network interfaces to add to the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
description (String),
device_index (Integer),
secondary_private_ip_address_count (Integer),
associate_public_ip_address (Boolean),
delete_on_termination (Boolean),
groups (List of Strings),
network_interface_id (String),
private_ip_address (String),
subnet_id (String),
associate_ipv6_address (Boolean),
private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
nomad:
version_added: 2.8
type: dict
description:
- The Nomad integration configuration.;
Expects the following keys-
master_host (String),
master_port (Integer),
acl_token (String),
auto_scale (Object expects the following keys-
is_enabled (Boolean),
cooldown (Integer),
headroom (Object expects the following keys-
cpu_per_unit (Integer),
memory_per_unit (Integer),
num_of_units (Integer)),
constraints (List of Objects expecting the following keys-
key (String),
value (String)),
down (Object expecting the following key -
down_evaluation_periods (Integer)))
on_demand_count:
type: int
description:
- Required if risk is not set
- Number of on demand instances to launch. All other instances will be spot instances.;
Either set this parameter or the risk parameter
on_demand_instance_type:
type: str
description:
- On-demand instance type that will be provisioned
required: true
opsworks:
type: dict
description:
- The elastigroup OpsWorks integration configration.;
Expects the following key -
layer_id (String)
persistence:
type: dict
description:
- The Stateful elastigroup configration.;
Expects the following keys -
should_persist_root_device (Boolean),
should_persist_block_devices (Boolean),
should_persist_private_ip (Boolean),
block_devices_mode(String ('reattach', 'onLaunch'))
preferred_spot_instance_types:
version_added: 2.8
type: list
description:
- The preferred spot instance types.;
private_ips:
version_added: 2.8
type: list
description:
- List of Private IPs to associate to the group instances.
product:
type: str
choices:
- Linux/UNIX
- SUSE Linux
- Windows
- Linux/UNIX (Amazon VPC)
- SUSE Linux (Amazon VPC)
- Windows
description:
- Operation system type.
required: true
rancher:
type: dict
description:
- The Rancher integration configuration.;
Expects the following keys -
version (String),
access_key (String),
secret_key (String),
master_host (String)
revert_to_spot:
version_added: 2.8
type: dict
description:
- Contains parameters for revert to spot
right_scale:
type: dict
description:
- The Rightscale integration configuration.;
Expects the following keys -
account_id (String),
refresh_token (String)
risk:
type: int
description:
- required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
roll_config:
type: dict
description:
- Roll configuration.;
If you would like the group to roll after updating, please use this feature.
Accepts the following keys -
batch_size_percentage(Integer, Required),
grace_period - (Integer, Required),
health_check_type(String, Optional)
route53:
version_added: 2.8
type: dict
description:
- The Route53 integration configuration.;
Expects the following key -
domains (List of Objects expecting the following keys -
hosted_zone_id (String),
record_sets (List of Objects expecting the following keys -
name (String)
use_public_ip (Boolean)))
scheduled_tasks:
type: list
description:
- a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
adjustment (Integer),
scale_target_capacity (Integer),
scale_min_capacity (Integer),
scale_max_capacity (Integer),
adjustment_percentage (Integer),
batch_size_percentage (Integer),
cron_expression (String),
frequency (String),
grace_period (Integer),
task_type (String, required),
is_enabled (Boolean)
security_group_ids:
type: list
description:
- One or more security group IDs. ;
In case of update it will override the existing Security Group with the new given array
required: true
shutdown_script:
type: str
description:
- The Base64-encoded shutdown script that executes prior to instance termination.
Encode before setting.
signals:
type: list
description:
- a list of hash/dictionaries of signals to configure in the elastigroup;
keys allowed are -
name (String, required),
timeout (Integer)
spin_up_time:
type: int
description:
- spin up time, in seconds, for the instance
spot_instance_types:
type: list
description:
- Spot instance type that will be provisioned.
required: true
stateful_deallocation_should_delete_network_interfaces:
version_added: 2.8
type: bool
description:
- Enable deletion of network interfaces on stateful group deletion
stateful_deallocation_should_delete_snapshots:
version_added: 2.8
type: bool
description:
- Enable deletion of snapshots on stateful group deletion
stateful_deallocation_should_delete_images:
version_added: 2.8
type: bool
description:
- Enable deletion of images on stateful group deletion
stateful_deallocation_should_delete_volumes:
version_added: 2.8
type: bool
description:
- Enable deletion of volumes on stateful group deletion
tags:
type: list
description:
- a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
target:
type: int
description:
- The number of instances to launch
required: true
target_group_arns:
type: list
description:
- List of target group arns instances should be registered to
tenancy:
type: str
choices:
- default
- dedicated
description:
- dedicated vs shared tenancy
terminate_at_end_of_billing_hour:
type: bool
description:
- terminate at the end of billing hour
unit:
type: str
choices:
- instance
- weight
description:
- The capacity unit to launch instances by.
required: true
up_scaling_policies:
type: list
description:
- a list of hash/dictionaries of scaling policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
metric_name (String, required),
dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
statistic (String, required)
evaluation_periods (String, required),
period (String, required),
threshold (String, required),
cooldown (String, required),
unit (String, required),
operator (String, required),
action_type (String, required),
adjustment (String),
min_target_capacity (String),
target (String),
maximum (String),
minimum (String),
shouldResumeStateful (Bool, relevant only for scale up policy)
down_scaling_policies:
type: list
description:
- a list of hash/dictionaries of scaling policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
metric_name (String, required),
dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
statistic (String, required),
evaluation_periods (String, required),
period (String, required),
threshold (String, required),
cooldown (String, required),
unit (String, required),
operator (String, required),
action_type (String, required),
adjustment (String),
max_target_capacity (String),
target (String),
maximum (String),
minimum (String)
target_tracking_policies:
type: list
description:
- a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
source (String, required),
metric_name (String, required),
statistic (String, required),
unit (String, required),
cooldown (String, required),
target (String, required)
uniqueness_by:
type: str
choices:
- id
- name
description:
- If your group names are not unique, you may use this feature to update or delete a specific group.
Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
user_data:
type: str
description:
- Base64-encoded MIME user data. Encode before setting the value.
utilize_reserved_instances:
type: bool
description:
- In case of any available Reserved Instances,
Elastigroup will utilize your reservations before purchasing Spot instances.
wait_for_instances:
type: bool
description:
- Whether or not the elastigroup creation / update actions should wait for the instances to spin
wait_timeout:
type: int
description:
- How long the module should wait for instances before failing the action.;
Only works if wait_for_instances is True.
"""
EXAMPLES = '''
# Basic configuration YAML example
- hosts: localhost
tasks:
- name: create elastigroup
aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
- hosts: localhost
tasks:
- name: create elastigroup
aws_elastigroup:
state: present
account_id: act-1a9dd2b
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
tags:
- Environment: someEnvValue
- OtherTagKey: otherValue
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 5
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group-tal
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-8f4b8fe9
block_device_mappings:
- device_name: '/dev/sda1'
ebs:
volume_size: 100
volume_type: gp2
spot_instance_types:
- c3.large
do_not_update:
- image_id
wait_for_instances: True
wait_timeout: 600
register: result
- name: Store private ips to file
shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
with_items: "{{ result.instances }}"
- debug: var=result
# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
# In organizations with more than one account, it is required to specify an account_id
- hosts: localhost
tasks:
- name: create elastigroup
aws_elastigroup:
state: present
account_id: act-1a9dd2b
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
tags:
- Environment: someEnvValue
- OtherTagKey: otherValue
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 5
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group-tal
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-8f4b8fe9
block_device_mappings:
- device_name: '/dev/xvda'
ebs:
volume_size: 60
volume_type: gp2
- device_name: '/dev/xvdb'
ebs:
volume_size: 120
volume_type: gp2
spot_instance_types:
- c3.large
do_not_update:
- image_id
wait_for_instances: True
wait_timeout: 600
register: result
- name: Store private ips to file
shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
with_items: "{{ result.instances }}"
- debug: var=result
# In this example we have set up block device mapping with ephemeral devices
- hosts: localhost
tasks:
- name: create elastigroup
aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
block_device_mappings:
- device_name: '/dev/xvda'
virtual_name: ephemeral0
- device_name: '/dev/xvdb/'
virtual_name: ephemeral1
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example we create a basic group configuration with a network interface defined.
# Each network interface must have a device index
- hosts: localhost
tasks:
- name: create elastigroup
aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
network_interfaces:
- associate_public_ip_address: true
device_index: 0
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example we create a basic group configuration with a target tracking scaling policy defined
- hosts: localhost
tasks:
- name: create elastigroup
aws_elastigroup:
account_id: act-92d45673
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-79da021e
image_id: ami-f173cc91
fallback_to_od: true
tags:
- Creator: ValueOfCreatorTag
- Environment: ValueOfEnvironmentTag
key_pair: spotinst-labs-oregon
max_size: 10
min_size: 0
target: 2
unit: instance
monitoring: True
name: ansible-group-1
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-46cdc13d
spot_instance_types:
- c3.large
target_tracking_policies:
- policy_name: target-tracking-1
namespace: AWS/EC2
metric_name: CPUUtilization
statistic: average
unit: percent
target: 50
cooldown: 120
do_not_update:
- image_id
register: result
- debug: var=result
'''
RETURN = '''
---
instances:
description: List of active elastigroup instances and their details.
returned: success
type: dict
sample: [
{
"spotInstanceRequestId": "sir-regs25zp",
"instanceId": "i-09640ad8678234c",
"instanceType": "m4.large",
"product": "Linux/UNIX",
"availabilityZone": "us-west-2b",
"privateIp": "180.0.2.244",
"createdAt": "2017-07-17T12:46:18.000Z",
"status": "fulfilled"
}
]
group_id:
description: Created / Updated group's ID.
returned: success
type: str
sample: "sig-12345"
'''
HAS_SPOTINST_SDK = False
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
import spotinst_sdk2 as spotinst
from spotinst_sdk2 import SpotinstSession
from spotinst_sdk2.client import SpotinstClientException
HAS_SPOTINST_SDK = True
except ImportError:
pass
eni_fields = ('description',
'device_index',
'secondary_private_ip_address_count',
'associate_public_ip_address',
'delete_on_termination',
'groups',
'network_interface_id',
'private_ip_address',
'subnet_id',
'associate_ipv6_address')
private_ip_fields = ('private_ip_address',
'primary')
capacity_fields = (dict(ansible_field_name='min_size',
spotinst_field_name='minimum'),
dict(ansible_field_name='max_size',
spotinst_field_name='maximum'),
'target',
'unit')
lspec_fields = ('user_data',
'key_pair',
'tenancy',
'shutdown_script',
'monitoring',
'ebs_optimized',
'image_id',
'health_check_type',
'health_check_grace_period',
'health_check_unhealthy_duration_before_replacement',
'security_group_ids')
iam_fields = (dict(ansible_field_name='iam_role_name',
spotinst_field_name='name'),
dict(ansible_field_name='iam_role_arn',
spotinst_field_name='arn'))
scheduled_task_fields = ('adjustment',
'adjustment_percentage',
'batch_size_percentage',
'cron_expression',
'frequency',
'grace_period',
'task_type',
'is_enabled',
'scale_target_capacity',
'scale_min_capacity',
'scale_max_capacity')
scaling_policy_fields = ('policy_name',
'namespace',
'metric_name',
'dimensions',
'statistic',
'evaluation_periods',
'period',
'threshold',
'cooldown',
'unit',
'operator',
'shouldResumeStateful')
tracking_policy_fields = ('policy_name',
'namespace',
'source',
'metric_name',
'statistic',
'unit',
'cooldown',
'target',
'threshold')
action_fields = (dict(ansible_field_name='action_type',
spotinst_field_name='type'),
'adjustment',
'min_target_capacity',
'max_target_capacity',
'target',
'minimum',
'maximum')
signal_fields = ('name',
'timeout')
multai_lb_fields = ('balancer_id',
'project_id',
'target_set_id',
'az_awareness',
'auto_weight')
persistence_fields = ('should_persist_root_device',
'should_persist_block_devices',
'should_persist_private_ip',
'block_devices_mode')
revert_to_spot_fields = ('perform_at',
'time_windows')
elastic_beanstalk_platform_update_fields = ('perform_at',
'time_window',
'update_level')
elastic_beanstalk_managed_actions_fields = ('platform_update')
strategy_fields = ('risk',
'utilize_reserved_instances',
'fallback_to_od',
'on_demand_count',
'availability_vs_cost',
'draining_timeout',
'spin_up_time',
'lifetime_period',
'revert_to_spot')
ebs_fields = ('delete_on_termination',
'encrypted',
'iops',
'snapshot_id',
'volume_type',
'volume_size')
bdm_fields = ('device_name',
'virtual_name',
'no_device')
kubernetes_fields = ('api_server',
'token',
'integration_mode',
'cluster_identifier')
kubernetes_auto_scale_fields = ('is_enabled', 'is_auto_config', 'cooldown')
kubernetes_headroom_fields = (
'cpu_per_unit',
'memory_per_unit',
'num_of_units')
kubernetes_labels_fields = ('key', 'value')
kubernetes_down_fields = ('evaluation_periods')
nomad_fields = ('master_host', 'master_port', 'acl_token')
nomad_auto_scale_fields = ('is_enabled', 'is_auto_config', 'cooldown')
nomad_headroom_fields = ('cpu_per_unit', 'memory_per_unit', 'num_of_units')
nomad_constraints_fields = ('key', 'value')
nomad_down_fields = ('evaluation_periods')
docker_swarm_fields = ('master_host', 'master_port')
docker_swarm_auto_scale_fields = ('is_enabled', 'cooldown')
docker_swarm_headroom_fields = (
'cpu_per_unit',
'memory_per_unit',
'num_of_units')
docker_swarm_down_fields = ('evaluation_periods')
route53_domain_fields = ('hosted_zone_id',)
route53_record_set_fields = ('name', 'use_public_ip')
mlb_runtime_fields = ('deployment_id',)
mlb_load_balancers_fields = (
'type',
'target_set_id',
'balancer_id',
'auto_weight',
'az_awareness')
elastic_beanstalk_fields = ('environment_id',)
elastic_beanstalk_deployment_fields = ('automatic_roll',
'batch_size_percentage',
'grace_period')
elastic_beanstalk_strategy_fields = ('action', 'should_drain_instances')
stateful_deallocation_fields = (
dict(
ansible_field_name='stateful_deallocation_should_delete_images',
spotinst_field_name='should_delete_images'),
dict(
ansible_field_name='stateful_deallocation_should_delete_snapshots',
spotinst_field_name='should_delete_snapshots'),
dict(
ansible_field_name='stateful_deallocation_should_delete_network_interfaces',
spotinst_field_name='should_delete_network_interfaces'),
dict(
ansible_field_name='stateful_deallocation_should_delete_volumes',
spotinst_field_name='should_delete_volumes'))
code_deploy_fields = ('clean_up_on_failure', 'terminate_instance_on_failure')
code_deploy_deployment_fields = ('application_name', 'deployment_group_name')
right_scale_fields = ('account_id',
'refresh_token')
rancher_fields = ('access_key',
'secret_key',
'master_host',
'version')
chef_fields = ('chef_server',
'organization',
'user',
'pem_key',
'chef_version')
az_fields = ('name',
'subnet_id',
'subnet_ids',
'placement_group_name')
opsworks_fields = ('layer_id',)
scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
mesosphere_fields = ('api_server',)
ecs_fields = ('cluster_name',)
ecs_auto_scale_fields = ('is_enabled', 'is_auto_config', 'cooldown')
ecs_headroom_fields = ('cpu_per_unit', 'memory_per_unit', 'num_of_units')
ecs_attributes_fields = ('key', 'value')
ecs_down_fields = ('evaluation_periods')
multai_fields = ('multai_token')
def handle_elastigroup(client, module):
has_changed = False
should_create = False
group_id = None
message = 'None'
name = module.params.get('name')
state = module.params.get('state')
uniqueness_by = module.params.get('uniqueness_by')
external_group_id = module.params.get('id')
if uniqueness_by == 'id':
if external_group_id is None:
should_create = True
else:
should_create = False
group_id = external_group_id
else:
groups = client.get_elastigroups()
should_create, group_id = find_group_with_same_name(groups, name)
if should_create is True:
if state == 'present':
eg = expand_elastigroup(module, is_update=False)
module.debug(str(" [INFO] " + message + "\n"))
group = client.create_elastigroup(group=eg)
group_id = group['id']
message = 'Created group Successfully.'
has_changed = True
elif state == 'absent':
message = 'Cannot delete non-existent group.'
has_changed = False
else:
eg = expand_elastigroup(module, is_update=True)
auto_apply_tags = module.params.get('auto_apply_tags')
if state == 'present':
group = client.update_elastigroup(group_update=eg, group_id=group_id, auto_apply_tags=auto_apply_tags)
message = 'Updated group successfully.'
try:
roll_config = module.params.get('roll_config')
if roll_config:
eg_roll = spotinst.models.elastigroup.aws.Roll(
batch_size_percentage=roll_config.get('batch_size_percentage'),
grace_period=roll_config.get('grace_period'),
health_check_type=roll_config.get('health_check_type')
)
roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
message = 'Updated and started rolling the group successfully.'
except SpotinstClientException as exc:
message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
has_changed = True
elif state == 'absent':
try:
stfl_dealloc_request = expand_fields(
stateful_deallocation_fields,
module.params, 'StatefulDeallocation')
if stfl_dealloc_request. \
should_delete_network_interfaces is True or \
stfl_dealloc_request.should_delete_images is True or \
stfl_dealloc_request.should_delete_volumes is True or \
stfl_dealloc_request.should_delete_snapshots is True:
client.delete_elastigroup_with_deallocation(
group_id=group_id,
stateful_deallocation=stfl_dealloc_request)
else:
client.delete_elastigroup(group_id=group_id)
except SpotinstClientException as exc:
if "GROUP_DOESNT_EXIST" in exc.message:
pass
else:
module.fail_json(
msg="Error while attempting to delete group :"
" " + exc.message)
message = 'Deleted group successfully.'
has_changed = True
return group_id, message, has_changed
def retrieve_group_instances(client, module, group_id):
wait_timeout = module.params.get('wait_timeout')
wait_for_instances = module.params.get('wait_for_instances')
health_check_type = module.params.get('health_check_type')
if wait_timeout is None:
wait_timeout = 300
wait_timeout = time.time() + wait_timeout
target = module.params.get('target')
state = module.params.get('state')
instances = list()
if state == 'present' and group_id is not None and wait_for_instances is True:
is_amount_fulfilled = False
while is_amount_fulfilled is False and wait_timeout > time.time():
instances = list()
amount_of_fulfilled_instances = 0
if health_check_type is not None:
healthy_instances = client.get_instance_healthiness(group_id=group_id)
for healthy_instance in healthy_instances:
if(healthy_instance.get('healthStatus') == 'HEALTHY'):
amount_of_fulfilled_instances += 1
instances.append(healthy_instance)
else:
active_instances = client.get_elastigroup_active_instances(group_id=group_id)
for active_instance in active_instances:
if active_instance.get('private_ip') is not None:
amount_of_fulfilled_instances += 1
instances.append(active_instance)
if amount_of_fulfilled_instances >= target:
is_amount_fulfilled = True
time.sleep(10)
return instances
def find_group_with_same_name(groups, name):
for group in groups:
if group['name'] == name:
return False, group.get('id')
return True, None
def expand_elastigroup(module, is_update):
do_not_update = module.params.get('do_not_update') or []
name = module.params.get('name')
eg = spotinst.models.elastigroup.aws.Elastigroup()
description = module.params.get('description')
if name is not None:
eg.name = name
if description is not None:
eg.description = description
# Capacity
expand_capacity(eg, module, is_update, do_not_update)
# Strategy
expand_strategy(eg, module)
# Scaling
expand_scaling(eg, module)
# Third party integrations
expand_integrations(eg, module)
# Compute
expand_compute(eg, module, is_update, do_not_update)
# Multai
expand_multai(eg, module)
# Scheduling
expand_scheduled_tasks(eg, module)
return eg
def expand_compute(eg, module, is_update, do_not_update):
elastic_ips = module.params.get('elastic_ips')
on_demand_instance_type = module.params.get('on_demand_instance_type')
spot_instance_types = module.params.get('spot_instance_types')
ebs_volume_pool = module.params.get('ebs_volume_pool')
availability_zones_list = module.params.get('availability_zones')
private_ips = module.params.get('private_ips')
product = module.params.get('product')
preferred_spot_instance_types = module.params.get(
'preferred_spot_instance_types')
eg_compute = spotinst.models.elastigroup.aws.Compute()
if product is not None:
# Only put product on group creation
if is_update is not True:
eg_compute.product = product
if elastic_ips is not None:
eg_compute.elastic_ips = elastic_ips
if private_ips:
eg_compute.private_ips = private_ips
if on_demand_instance_type is not None or spot_instance_types is not None or preferred_spot_instance_types is not None:
eg_instance_types = spotinst.models.elastigroup.aws.InstanceTypes()
if on_demand_instance_type is not None:
eg_instance_types.spot = spot_instance_types
if spot_instance_types is not None:
eg_instance_types.ondemand = on_demand_instance_type
if preferred_spot_instance_types is not None:
eg_instance_types.preferred_spot = preferred_spot_instance_types
if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
eg_compute.instance_types = eg_instance_types
expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
expand_launch_spec(eg_compute, module, is_update, do_not_update)
eg.compute = eg_compute
def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
if ebs_volumes_list is not None:
eg_volumes = []
for volume in ebs_volumes_list:
eg_volume = spotinst.models.elastigroup.aws.EbsVolume()
if volume.get('device_name') is not None:
eg_volume.device_name = volume.get('device_name')
if volume.get('volume_ids') is not None:
eg_volume.volume_ids = volume.get('volume_ids')
if eg_volume.device_name is not None:
eg_volumes.append(eg_volume)
if len(eg_volumes) > 0:
eg_compute.ebs_volume_pool = eg_volumes
def expand_launch_spec(eg_compute, module, is_update, do_not_update):
eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
if module.params.get('iam_role_arn') is not None or module.params.get('iam_role_name') is not None:
eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
tags = module.params.get('tags')
load_balancers = module.params.get('load_balancers')
mlb_load_balancers = module.params.get('mlb_load_balancers')
target_group_arns = module.params.get('target_group_arns')
block_device_mappings = module.params.get('block_device_mappings')
network_interfaces = module.params.get('network_interfaces')
credit_specification = module.params.get('credit_specification')
if is_update is True:
if 'image_id' in do_not_update:
delattr(eg_launch_spec, 'image_id')
expand_tags(eg_launch_spec, tags)
expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns, mlb_load_balancers)
expand_block_device_mappings(eg_launch_spec, block_device_mappings)
expand_network_interfaces(eg_launch_spec, network_interfaces)
expand_credit_specification(eg_launch_spec, credit_specification)
eg_compute.launch_specification = eg_launch_spec
def expand_credit_specification(eg_launch_spec, credit_specification):
eg_credit_specification = None
if credit_specification is not None:
eg_credit_specification = spotinst.models.elastigroup.aws.CreditSpecification()
cpu_credits = credit_specification.get('cpu_credits')
if cpu_credits is not None:
eg_credit_specification.cpu_credits = cpu_credits
eg_launch_spec.credit_specification = eg_credit_specification
def expand_integrations(eg, module):
rancher = module.params.get('rancher')
mesosphere = module.params.get('mesosphere')
ecs = module.params.get('ecs')
kubernetes = module.params.get('kubernetes')
nomad = module.params.get('nomad')
docker_swarm = module.params.get('docker_swarm')
route53 = module.params.get('route53')
right_scale = module.params.get('right_scale')
opsworks = module.params.get('opsworks')
chef = module.params.get('chef')
mlb_runtime = module.params.get('mlb_runtime')
elastic_beanstalk = module.params.get('elastic_beanstalk')
code_deploy = module.params.get('code_deploy')
integration_exists = False
eg_integrations = spotinst.models.elastigroup.aws.ThirdPartyIntegrations()
if mesosphere is not None:
eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
integration_exists = True
if ecs is not None:
expand_ecs(eg_integrations, ecs)
integration_exists = True
if kubernetes is not None:
expand_kubernetes(eg_integrations, kubernetes)
integration_exists = True
if nomad is not None:
expand_nomad(eg_integrations, nomad)
integration_exists = True
if docker_swarm is not None:
expand_docker_swarm(eg_integrations, docker_swarm)
integration_exists = True
if route53 is not None:
expand_route53(eg_integrations, route53)
integration_exists = True
if mlb_runtime is not None:
eg_integrations.mlb_runtime = expand_fields(
mlb_runtime_fields, mlb_runtime, 'MlbRuntimeConfiguration')
integration_exists = True
if elastic_beanstalk:
expand_elastic_beanstalk(eg_integrations, elastic_beanstalk)
integration_exists = True
if code_deploy is not None:
expand_code_deploy(eg_integrations, code_deploy)
integration_exists = True
if right_scale is not None:
eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
integration_exists = True
if opsworks is not None:
eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
integration_exists = True
if rancher is not None:
eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
integration_exists = True
if chef is not None:
eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
integration_exists = True
if integration_exists:
eg.third_parties_integration = eg_integrations
def expand_ecs(eg_integrations, ecs_config):
ecs = expand_fields(ecs_fields, ecs_config, 'EcsConfiguration')
ecs_auto_scale_config = ecs_config.get('auto_scale', None)
if ecs_auto_scale_config:
ecs.auto_scale = expand_fields(
ecs_auto_scale_fields,
ecs_auto_scale_config,
'EcsAutoScaleConfiguration')
ecs_headroom_config = ecs_auto_scale_config.get('headroom', None)
if ecs_headroom_config:
ecs.auto_scale.headroom = expand_fields(
ecs_headroom_fields,
ecs_headroom_config,
'EcsAutoScalerHeadroomConfiguration')
ecs_attributes_config = ecs_auto_scale_config.get('attributes', None)
if ecs_attributes_config:
ecs.auto_scale.attributes = expand_list(
ecs_attributes_config,
ecs_attributes_fields,
'EcsAutoScalerAttributeConfiguration')
ecs_down_config = ecs_auto_scale_config.get('down', None)
if ecs_down_config:
ecs.auto_scale.down = expand_fields(
ecs_down_fields, ecs_down_config,
'EcsAutoScalerDownConfiguration')
eg_integrations.ecs = ecs
def expand_nomad(eg_integrations, nomad_config):
nomad = expand_fields(nomad_fields, nomad_config, 'NomadConfiguration')
nomad_auto_scale_config = nomad_config.get('auto_scale', None)
if nomad_auto_scale_config:
nomad.auto_scale = expand_fields(
nomad_auto_scale_fields,
nomad_auto_scale_config,
'NomadAutoScalerConfiguration')
nomad_headroom_config = nomad_auto_scale_config.get('headroom', None)
if nomad_headroom_config:
nomad.auto_scale.headroom = expand_fields(
nomad_headroom_fields,
nomad_headroom_config,
'NomadAutoScalerHeadroomConfiguration')
nomad_constraints_config = nomad_auto_scale_config.get(
'constraints', None)
if nomad_constraints_config:
nomad.auto_scale.constraints = expand_list(
nomad_constraints_config,
nomad_constraints_fields,
'NomadAutoScalerConstraintsConfiguration')
nomad_down_config = nomad_auto_scale_config.get('down', None)
if nomad_down_config:
nomad.auto_scale.down = expand_fields(
nomad_down_fields,
nomad_down_config,
'NomadAutoScalerDownConfiguration')
eg_integrations.nomad = nomad
def expand_code_deploy(eg_integrations, code_deploy_config):
code_deploy = expand_fields(
code_deploy_fields, code_deploy_config, 'CodeDeployConfiguration')
code_deploy_deployment_config = code_deploy_config.get(
'deployment_groups', None)
if code_deploy_deployment_config:
code_deploy.deployment_groups = expand_list(
code_deploy_deployment_config, code_deploy_deployment_fields,
'CodeDeployDeploymentGroupsConfiguration')
eg_integrations.code_deploy = code_deploy
def expand_docker_swarm(eg_integrations, docker_swarm_config):
docker_swarm = expand_fields(
docker_swarm_fields,
docker_swarm_config,
'DockerSwarmConfiguration')
docker_swarm_auto_scale_config = docker_swarm_config.get(
'auto_scale', None)
if docker_swarm_auto_scale_config:
docker_swarm.auto_scale = expand_fields(
docker_swarm_auto_scale_fields,
docker_swarm_auto_scale_config,
'DockerSwarmAutoScalerConfiguration')
docker_swarm_headroom_config = docker_swarm_auto_scale_config.get(
'headroom', None)
if docker_swarm_headroom_config:
docker_swarm.auto_scale.headroom = expand_fields(
docker_swarm_headroom_fields,
docker_swarm_headroom_config,
'DockerSwarmAutoScalerHeadroomConfiguration')
docker_swarm_down_config = docker_swarm_auto_scale_config.get(
'down', None)
if docker_swarm_down_config:
docker_swarm.auto_scale.down = expand_fields(
docker_swarm_down_fields,
docker_swarm_down_config,
'DockerSwarmAutoScalerDownConfiguration')
eg_integrations.docker_swarm = docker_swarm
def expand_route53(eg_integrations, route53_config):
route53 = spotinst.models.elastigroup.aws.Route53Configuration()
domains_configuration = route53_config.get('domains', None)
if domains_configuration:
route53.domains = expand_list(
domains_configuration,
route53_domain_fields,
'Route53DomainsConfiguration')
for i in range(len(route53.domains)):
expanded_domain = route53.domains[i]
raw_domain = domains_configuration[i]
expanded_domain.record_sets = expand_list(
raw_domain['record_sets'],
route53_record_set_fields,
'Route53RecordSetsConfiguration')
eg_integrations.route53 = route53
def expand_elastic_beanstalk(eg_integrations, elastic_beanstalk_config):
elastic_beanstalk = expand_fields(
elastic_beanstalk_fields, elastic_beanstalk_config, 'ElasticBeanstalk')
elastic_beanstalk_deployment = elastic_beanstalk_config.get(
'deployment_preferences', None)
elastic_beanstalk_managed_actions = elastic_beanstalk_config.get(
'managed_actions', None)
if elastic_beanstalk_deployment:
elastic_beanstalk.deployment_preferences = expand_fields(
elastic_beanstalk_deployment_fields, elastic_beanstalk_deployment,
'DeploymentPreferences')
if elastic_beanstalk.deployment_preferences and elastic_beanstalk_deployment.get('strategy'):
elastic_beanstalk.deployment_preferences.strategy = \
expand_fields(elastic_beanstalk_strategy_fields,
elastic_beanstalk_deployment['strategy'],
'BeanstalkDeploymentStrategy')
if elastic_beanstalk_managed_actions:
elastic_beanstalk.managed_actions = expand_fields(
elastic_beanstalk_managed_actions_fields, elastic_beanstalk_managed_actions,
'ManagedActions')
if elastic_beanstalk.managed_actions:
elastic_beanstalk.managed_actions.platform_update = expand_fields(
elastic_beanstalk_platform_update_fields, elastic_beanstalk_managed_actions['platform_update'],
'PlatformUpdate')
eg_integrations.elastic_beanstalk = elastic_beanstalk
def expand_kubernetes(eg_integrations, kubernetes_config):
kubernetes = expand_fields(
kubernetes_fields,
kubernetes_config,
'KubernetesConfiguration')
kubernetes_auto_scale_config = kubernetes_config.get('auto_scale', None)
if kubernetes_auto_scale_config:
kubernetes.auto_scale = expand_fields(
kubernetes_auto_scale_fields,
kubernetes_auto_scale_config,
'KubernetesAutoScalerConfiguration')
kubernetes_headroom_config = kubernetes_auto_scale_config.get(
'auto_scale', None)
if kubernetes_headroom_config:
kubernetes.auto_scale.headroom = expand_fields(
kubernetes_headroom_fields,
kubernetes_headroom_config,
'KubernetesAutoScalerHeadroomConfiguration')
kubernetes_labels_config = kubernetes_auto_scale_config.get(
'labels', None)
if kubernetes_labels_config:
kubernetes.auto_scale.labels = expand_list(
kubernetes_labels_config,
kubernetes_labels_fields,
'KubernetesAutoScalerLabelsConfiguration')
kubernetes_down_config = kubernetes_auto_scale_config.get('down', None)
if kubernetes_down_config:
kubernetes.auto_scale.down = expand_fields(
kubernetes_down_fields,
kubernetes_down_config,
'KubernetesAutoScalerDownConfiguration')
eg_integrations.kubernetes = kubernetes
def expand_capacity(eg, module, is_update, do_not_update):
eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
if is_update is True:
delattr(eg_capacity, 'unit')
if 'target' in do_not_update:
delattr(eg_capacity, 'target')
eg.capacity = eg_capacity
def expand_strategy(eg, module):
persistence = module.params.get('persistence')
signals = module.params.get('signals')
revert_to_spot = module.params.get('revert_to_spot')
eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
if terminate_at_end_of_billing_hour is not None:
eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, module.params, 'ScalingStrategy')
if persistence is not None:
eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
if signals is not None:
eg_signals = expand_list(signals, signal_fields, 'Signal')
if len(eg_signals) > 0:
eg_strategy.signals = eg_signals
if revert_to_spot is not None:
eg_strategy.revert_to_spot = expand_fields(revert_to_spot_fields, revert_to_spot, "RevertToSpot")
eg.strategy = eg_strategy
def expand_multai(eg, module):
multai_load_balancers = module.params.get('multai_load_balancers')
eg_multai = expand_fields(multai_fields, module.params, 'Multai')
if multai_load_balancers is not None:
eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
if len(eg_multai_load_balancers) > 0:
eg_multai.balancers = eg_multai_load_balancers
eg.multai = eg_multai
def expand_scheduled_tasks(eg, module):
scheduled_tasks = module.params.get('scheduled_tasks')
if scheduled_tasks is not None:
eg_scheduling = spotinst.models.elastigroup.aws.Scheduling()
eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
if len(eg_tasks) > 0:
eg_scheduling.tasks = eg_tasks
eg.scheduling = eg_scheduling
def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns, mlb_load_balancers):
if load_balancers is not None or target_group_arns is not None:
eg_load_balancers_config = spotinst.models.elastigroup.aws.LoadBalancersConfig()
eg_total_lbs = []
if load_balancers is not None:
for elb_name in load_balancers:
eg_elb = spotinst.models.elastigroup.aws.LoadBalancer()
if elb_name is not None:
eg_elb.name = elb_name
eg_elb.type = 'CLASSIC'
eg_total_lbs.append(eg_elb)
if target_group_arns is not None:
for target_arn in target_group_arns:
eg_elb = spotinst.models.elastigroup.aws.LoadBalancer()
if target_arn is not None:
eg_elb.arn = target_arn
eg_elb.type = 'TARGET_GROUP'
eg_total_lbs.append(eg_elb)
if mlb_load_balancers:
mlbs = expand_list(
mlb_load_balancers,
mlb_load_balancers_fields,
'LoadBalancer')
for mlb in mlbs:
mlb.type = "MULTAI_TARGET_SET"
eg_total_lbs.extend(mlbs)
if len(eg_total_lbs) > 0:
eg_load_balancers_config.load_balancers = eg_total_lbs
eg_launchspec.load_balancers_config = eg_load_balancers_config
def expand_tags(eg_launchspec, tags):
if tags is not None:
eg_tags = []
for tag in tags:
eg_tag = spotinst.models.elastigroup.aws.Tag()
if list(tag):
eg_tag.tag_key = list(tag)[0]
if tag[list(tag)[0]]:
eg_tag.tag_value = tag[list(tag)[0]]
eg_tags.append(eg_tag)
if len(eg_tags) > 0:
eg_launchspec.tags = eg_tags
def expand_block_device_mappings(eg_launchspec, bdms):
if bdms is not None:
eg_bdms = []
for bdm in bdms:
eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
if bdm.get('ebs') is not None:
eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
eg_bdms.append(eg_bdm)
if len(eg_bdms) > 0:
eg_launchspec.block_device_mappings = eg_bdms
def expand_network_interfaces(eg_launchspec, enis):
if enis is not None:
eg_enis = []
for eni in enis:
eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
if eg_pias is not None:
eg_eni.private_ip_addresses = eg_pias
eg_enis.append(eg_eni)
if len(eg_enis) > 0:
eg_launchspec.network_interfaces = eg_enis
def expand_scaling(eg, module):
up_scaling_policies = module.params.get('up_scaling_policies')
down_scaling_policies = module.params.get('down_scaling_policies')
target_tracking_policies = module.params.get('target_tracking_policies')
eg_scaling = spotinst.models.elastigroup.aws.Scaling()
if up_scaling_policies is not None:
eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
if len(eg_up_scaling_policies) > 0:
eg_scaling.up = eg_up_scaling_policies
if down_scaling_policies is not None:
eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
if len(eg_down_scaling_policies) > 0:
eg_scaling.down = eg_down_scaling_policies
if target_tracking_policies is not None:
eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
if len(eg_target_tracking_policies) > 0:
eg_scaling.target = eg_target_tracking_policies
if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
eg.scaling = eg_scaling
def expand_list(items, fields, class_name):
if items is not None:
new_objects_list = []
for item in items:
new_obj = expand_fields(fields, item, class_name)
new_objects_list.append(new_obj)
return new_objects_list
def expand_fields(fields, item, class_name):
class_ = getattr(spotinst.models.elastigroup.aws, class_name)
new_obj = class_()
# Handle primitive fields
if item is not None:
for field in fields:
if isinstance(field, dict):
ansible_field_name = field['ansible_field_name']
spotinst_field_name = field['spotinst_field_name']
else:
ansible_field_name = field
spotinst_field_name = field
if item.get(ansible_field_name) is not None:
setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
return new_obj
def expand_scaling_policies(scaling_policies):
eg_scaling_policies = []
for policy in scaling_policies:
eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
eg_scaling_policies.append(eg_policy)
return eg_scaling_policies
def expand_target_tracking_policies(tracking_policies):
eg_tracking_policies = []
for policy in tracking_policies:
eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
eg_tracking_policies.append(eg_policy)
return eg_tracking_policies
def get_client(module):
# Retrieve creds file variables
creds_file_loaded_vars = dict()
credentials_path = module.params.get('credentials_path')
if credentials_path is not None:
try:
with open(credentials_path, "r") as creds:
for line in creds:
eq_index = line.find(':')
var_name = line[:eq_index].strip()
string_value = line[eq_index + 1:].strip()
creds_file_loaded_vars[var_name] = string_value
except IOError:
pass
# End of creds file retrieval
token = module.params.get('token')
if not token:
token = creds_file_loaded_vars.get("token")
account = module.params.get('account_id')
if not account:
account = creds_file_loaded_vars.get("account")
if account is not None:
session = spotinst.SpotinstSession(auth_token=token, account_id=account)
else:
session = spotinst.SpotinstSession(auth_token=token)
client = session.client("elastigroup_aws")
return client
def main():
fields = dict(
account_id=dict(type='str', fallback=(env_fallback, ['SPOTINST_ACCOUNT_ID', 'ACCOUNT'])),
auto_apply_tags=dict(type='bool'),
availability_vs_cost=dict(type='str', required=True),
availability_zones=dict(type='list', required=True),
block_device_mappings=dict(type='list'),
chef=dict(type='dict'),
code_deploy=dict(type='dict'),
credentials_path=dict(type='path', default="~/.spotinst/credentials"),
credit_specification=dict(type='dict'),
do_not_update=dict(default=[], type='list'),
docker_swarm=dict(type='dict'),
down_scaling_policies=dict(type='list'),
draining_timeout=dict(type='int'),
ebs_optimized=dict(type='bool'),
ebs_volume_pool=dict(type='list'),
ecs=dict(type='dict'),
elastic_beanstalk=dict(type='dict'),
elastic_ips=dict(type='list'),
fallback_to_od=dict(type='bool'),
id=dict(type='str'),
health_check_grace_period=dict(type='int'),
health_check_type=dict(type='str'),
health_check_unhealthy_duration_before_replacement=dict(type='int'),
iam_role_arn=dict(type='str'),
iam_role_name=dict(type='str'),
image_id=dict(type='str', required=True),
key_pair=dict(type='str'),
kubernetes=dict(type='dict'),
lifetime_period=dict(type='int'),
load_balancers=dict(type='list'),
max_size=dict(type='int', required=True),
mesosphere=dict(type='dict'),
min_size=dict(type='int', required=True),
mlb_runtime=dict(type='dict'),
mlb_load_balancers=dict(type='list'),
monitoring=dict(type='str'),
multai_load_balancers=dict(type='list'),
multai_token=dict(type='str'),
name=dict(type='str', required=True),
network_interfaces=dict(type='list'),
nomad=dict(type='dict'),
on_demand_count=dict(type='int'),
on_demand_instance_type=dict(type='str'),
opsworks=dict(type='dict'),
persistence=dict(type='dict'),
preferred_spot_instance_types=dict(type='list'),
private_ips=dict(type='list'),
product=dict(type='str', required=True),
rancher=dict(type='dict'),
revert_to_spot=dict(type='dict'),
right_scale=dict(type='dict'),
risk=dict(type='int'),
roll_config=dict(type='dict'),
route53=dict(type='dict'),
scheduled_tasks=dict(type='list'),
security_group_ids=dict(type='list', required=True),
shutdown_script=dict(type='str'),
signals=dict(type='list'),
spin_up_time=dict(type='int'),
spot_instance_types=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
stateful_deallocation_should_delete_images=dict(type='bool'),
stateful_deallocation_should_delete_network_interfaces=dict(type='bool'),
stateful_deallocation_should_delete_snapshots=dict(type='bool'),
stateful_deallocation_should_delete_volumes=dict(type='bool'),
tags=dict(type='list'),
target=dict(type='int', required=True),
target_group_arns=dict(type='list'),
tenancy=dict(type='str'),
terminate_at_end_of_billing_hour=dict(type='bool'),
token=dict(type='str', fallback=(env_fallback, ['SPOTINST_TOKEN'])),
unit=dict(type='str'),
user_data=dict(type='str'),
utilize_reserved_instances=dict(type='bool'),
uniqueness_by=dict(default='name', choices=['name', 'id']),
up_scaling_policies=dict(type='list'),
target_tracking_policies=dict(type='list'),
wait_for_instances=dict(type='bool', default=False),
wait_timeout=dict(type='int')
)
module = AnsibleModule(argument_spec=fields)
if not HAS_SPOTINST_SDK:
module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk2)")
client = get_client(module=module)
group_id, message, has_changed = handle_elastigroup(client=client, module=module)
instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
if __name__ == '__main__':
main()
| 32.533181
| 147
| 0.642358
|
53d3d18bf6903dce8a8def495382774bcc2d4c65
| 2,478
|
py
|
Python
|
src/gedml/core/collectors/base_collector.py
|
zbr17/GeDML
|
83f6f835e7c3319e32432b3013820f98476cc96c
|
[
"MIT"
] | 25
|
2021-09-06T13:26:02.000Z
|
2022-01-06T13:25:24.000Z
|
src/gedml/core/collectors/base_collector.py
|
zbr17/GeDML
|
83f6f835e7c3319e32432b3013820f98476cc96c
|
[
"MIT"
] | 1
|
2021-09-09T08:29:29.000Z
|
2021-09-13T15:05:59.000Z
|
src/gedml/core/collectors/base_collector.py
|
zbr17/GeDML
|
83f6f835e7c3319e32432b3013820f98476cc96c
|
[
"MIT"
] | 2
|
2021-09-07T08:44:41.000Z
|
2021-09-09T08:31:55.000Z
|
import torch
from abc import ABCMeta, abstractmethod
from ..modules import WithRecorder
class BaseCollector(WithRecorder, metaclass=ABCMeta):
"""
Base class of collector module, which defines main collector method in function ``collect`` and ``update``, and defines default parameters in function ``output_list``, ``input_list`` and ``_default_next_module``.
Args:
metric (metric instance):
metric to compute matrix (e.g. euclidean or cosine)
Example:
>>> metric = MetricFactory(is_normalize=True, metric_name="cosine")
>>> data = torch.randn(10, 3, 227, 227)
>>> embeddings = torch.randn(10, 128)
>>> labels = torch.randint(0, 3, size=(10,))
>>> collector = DefaultCollector(metric=metric)
>>> # collector forward
>>> output_dict = collector(data, embeddings, labels)
"""
def __init__(self, metric, **kwargs):
super().__init__(**kwargs)
self.metric = metric
def forward(
self,
embeddings,
labels
) -> tuple:
"""
In ``collect`` function, three kinds of operation may be done:
1. maintain sets of parameters about collecting (or synthesizing) samples
2. compute metric matrix and pass to next module
3. compute some regularization term using embeddings
Args:
embeddings (torch.Tensor):
Embedding. size: :math:`B \\times dim`
lables (torch.Tensor):
Ground truth of dataset. size: :math:`B \\times 1`
Returns:
tuple: include metric matrix, labels etc according to function ``output_list``.
Let :math:`B_{row}` be the length of rows and :math:`B_{col}` be the length of columns, typical output type is listed below:
1. metric matrix (torch.Tensor): size: :math:`B_{row} \\times B_{col}`
2. labels of rows (torch.Tensor): size: :math:`B_{row} \\times 1` or :math:`B_{row} \\times B_{col}`
3. labels of columns (torch.Tensor): size: :math:`1 \\times B_{col}` or :math:`B_{row} \\times B_{col}`
4. is_from_same_source (bool): indicate whether row vectors and column vectors are from the same data
"""
metric_mat, labels, proxies_labels, is_same_source = None, None, None, False
return (
metric_mat,
labels,
proxies_labels,
is_same_source
)
| 39.333333
| 216
| 0.604923
|
c8c77525d0e786e301d5336063523930dca62dc4
| 3,710
|
py
|
Python
|
broker/oanda/position.py
|
lorne-luo/qsforex
|
87976aef91a3f4dd4ef1d3de17db1b014b46e4d8
|
[
"Unlicense",
"MIT"
] | null | null | null |
broker/oanda/position.py
|
lorne-luo/qsforex
|
87976aef91a3f4dd4ef1d3de17db1b014b46e4d8
|
[
"Unlicense",
"MIT"
] | 7
|
2020-02-11T23:56:08.000Z
|
2022-02-10T07:35:07.000Z
|
broker/oanda/position.py
|
lorne-luo/qsforex
|
87976aef91a3f4dd4ef1d3de17db1b014b46e4d8
|
[
"Unlicense",
"MIT"
] | null | null | null |
import logging
from broker.base import PositionBase
from broker.oanda.base import OANDABase
from broker.oanda.common.convertor import get_symbol
from broker.oanda.common.logger import log_error
logger = logging.getLogger(__name__)
class PositionMixin(OANDABase, PositionBase):
def pull_position(self, instrument):
"""pull position by instrument"""
instrument = get_symbol(instrument)
response = self.api.position.get(self.account_id, instrument)
if response.status < 200 or response.status > 299:
log_error(logger, response, 'QEURY_POSITION')
raise Exception(response.body['errorMessage'])
last_transaction_id = response.get('lastTransactionID', 200)
position = response.get('position', 200)
if position:
self.positions[position.instrument] = position
return position
def list_all_positions(self):
response = self.api.position.list(
self.account_id,
)
if response.status < 200 or response.status > 299:
log_error(logger, response, 'LIST_ALL_POSITION')
raise Exception(response.body['errorMessage'])
last_transaction_id = response.get('lastTransactionID', 200)
positions = response.get('positions', 200)
for position in positions:
self.positions[position.instrument] = position
return positions
def list_open_positions(self):
response = self.api.position.list_open(
self.account_id,
)
if response.status < 200 or response.status > 299:
log_error(logger, response, 'LIST_OPEN_POSITION')
raise Exception(response.body['errorMessage'])
last_transaction_id = response.get('lastTransactionID', 200)
positions = response.get('positions', 200)
for position in positions:
self.positions[position.instrument] = position
return positions
def close_all_position(self):
instruments = self.positions.keys()
logger.error('[CLOSE_ALL_POSITIONS] Start.')
for i in instruments:
self.close_position(i, 'ALL', 'ALL')
def close_position(self, instrument, longUnits='ALL', shortUnits='ALL'):
instrument = get_symbol(instrument)
response = self.api.position.close(
self.account_id,
instrument,
longUnits=longUnits,
shortUnits=shortUnits
)
if response.status < 200 or response.status > 299:
log_error(logger, response, 'CLOSE_POSITION')
raise Exception(response.body['errorMessage'])
longOrderCreateTransaction = response.get('longOrderCreateTransaction', None)
longOrderFillTransaction = response.get('longOrderFillTransaction', None)
longOrderCancelTransaction = response.get('longOrderCancelTransaction', None)
shortOrderCreateTransaction = response.get('shortOrderCreateTransaction', None)
shortOrderFillTransaction = response.get('shortOrderFillTransaction', None)
shortOrderCancelTransaction = response.get('shortOrderCancelTransaction', None)
relatedTransactionIDs = response.get('relatedTransactionIDs', None)
lastTransactionID = response.get('lastTransactionID', None)
print(longOrderCreateTransaction.__dict__)
print(longOrderFillTransaction.__dict__)
print(longOrderCancelTransaction.__dict__)
print(shortOrderCreateTransaction.__dict__)
print(shortOrderFillTransaction.__dict__)
print(shortOrderCancelTransaction.__dict__)
print(relatedTransactionIDs.__dict__)
print(lastTransactionID)
return True
| 38.247423
| 87
| 0.683827
|
cf7c89d4bc4a19c01768bccdde351e1c9b7ad481
| 40
|
py
|
Python
|
tests/databases/__init__.py
|
sethfischer/mundo-flags
|
20e5ad68d760b6c736701f6e43551c738456098d
|
[
"MIT"
] | null | null | null |
tests/databases/__init__.py
|
sethfischer/mundo-flags
|
20e5ad68d760b6c736701f6e43551c738456098d
|
[
"MIT"
] | 1
|
2021-09-06T01:48:18.000Z
|
2021-09-06T08:47:36.000Z
|
tests/databases/__init__.py
|
sethfischer/mundo-flags
|
20e5ad68d760b6c736701f6e43551c738456098d
|
[
"MIT"
] | null | null | null |
"""Tests for manage_flags databases."""
| 20
| 39
| 0.725
|
7768e2016aaf998684615ecce25c01243c2885de
| 438
|
py
|
Python
|
server/api0/permissions.py
|
hiway/zentropi20
|
16a5afa9aac98f1e0c5856b2a4608fc4cede25c9
|
[
"BSD-2-Clause"
] | null | null | null |
server/api0/permissions.py
|
hiway/zentropi20
|
16a5afa9aac98f1e0c5856b2a4608fc4cede25c9
|
[
"BSD-2-Clause"
] | null | null | null |
server/api0/permissions.py
|
hiway/zentropi20
|
16a5afa9aac98f1e0c5856b2a4608fc4cede25c9
|
[
"BSD-2-Clause"
] | null | null | null |
from rest_framework.permissions import BasePermission
from .models import Frame
class IsOwner(BasePermission):
"""Custom permission class to allow only frame owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the frame owner."""
if isinstance(obj, Frame):
return obj.owner == request.user
return obj.owner == request.user
| 33.692308
| 74
| 0.700913
|
c4ed8c27196855b6d46f4b530abf8e7e4b8717cc
| 49
|
py
|
Python
|
src/carbonium/elements/__init__.py
|
cowboycodr/carbonium
|
ad0480e02631337e308a46f856bc9b58fac35c58
|
[
"Apache-2.0"
] | 1
|
2021-09-23T19:22:40.000Z
|
2021-09-23T19:22:40.000Z
|
src/carbonium/elements/__init__.py
|
cowboycodr/carbonium
|
ad0480e02631337e308a46f856bc9b58fac35c58
|
[
"Apache-2.0"
] | null | null | null |
src/carbonium/elements/__init__.py
|
cowboycodr/carbonium
|
ad0480e02631337e308a46f856bc9b58fac35c58
|
[
"Apache-2.0"
] | null | null | null |
from node import Node
from element import Element
| 24.5
| 27
| 0.857143
|
b89815d1ed08fef2599c808027337fa6a902becc
| 1,297
|
py
|
Python
|
scripts/xml_reader.py
|
slabasan/hatchet
|
b8f2d8f0c51b95d0632fb0db95ac5caf2ea7f517
|
[
"MIT"
] | null | null | null |
scripts/xml_reader.py
|
slabasan/hatchet
|
b8f2d8f0c51b95d0632fb0db95ac5caf2ea7f517
|
[
"MIT"
] | null | null | null |
scripts/xml_reader.py
|
slabasan/hatchet
|
b8f2d8f0c51b95d0632fb0db95ac5caf2ea7f517
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
from __future__ import print_function
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
tree = ET.parse("experiment.xml")
root = tree.getroot()
# Root
print(root.tag, root.attrib)
# Root[0] - Header
print("\t", root[0].tag, root[0].attrib)
for child in root[0]:
print("\t\t", child.tag, child.attrib)
# Root[0][0] - Info (empty)
# for elem in root[0][0].iter():
# print elem.tag, elem.attrib
# Root[1] - SecCallPathProfile
print("\t", root[1].tag, root[1].attrib)
for child in root[1]:
print("\t\t", child.tag, child.attrib)
print()
# Root[1][0] - SecHeader
# Children - MetricTable, MetricDBTable, TraceDBTable, LoadModuleTable,
# FileTable, ProcedureTable
for loadm in root[1][0][3].iter("LoadModule"):
print(loadm.attrib)
print()
for filename in root[1][0][4].iter("File"):
print(filename.attrib)
print()
for procedure in root[1][0][5].iter("Procedure"):
print(procedure.attrib)
print()
# Root[1][1] - SecCallPathProfileData
for elem in root[1][1].iter():
print(elem.tag, elem.attrib)
| 24.018519
| 73
| 0.688512
|
56e5c536074d74d31f4d24ac8e326a346c1ae65e
| 2,563
|
py
|
Python
|
test/models/test_deepset.py
|
NetKet/netket
|
96758e814fc3128e6821564d6cc2852bac40ecf2
|
[
"Apache-2.0"
] | null | null | null |
test/models/test_deepset.py
|
NetKet/netket
|
96758e814fc3128e6821564d6cc2852bac40ecf2
|
[
"Apache-2.0"
] | null | null | null |
test/models/test_deepset.py
|
NetKet/netket
|
96758e814fc3128e6821564d6cc2852bac40ecf2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The NetKet Authors - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import jax
import jax.numpy as jnp
import netket as nk
@pytest.mark.parametrize(
"cusp_exponent", [pytest.param(None, id="cusp=None"), pytest.param(5, id="cusp=5")]
)
@pytest.mark.parametrize(
"L",
[
pytest.param(1.0, id="1D"),
pytest.param((1.0, 1.0), id="2D-Square"),
pytest.param((1.0, 0.5), id="2D-Rectangle"),
],
)
def test_deepsets(cusp_exponent, L):
hilb = nk.hilbert.Particle(N=2, L=L, pbc=True)
sdim = len(hilb.extent)
x = jnp.hstack([jnp.ones(4), -jnp.ones(4)]).reshape(1, -1)
xp = jnp.roll(x, sdim)
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
cusp_exponent=cusp_exponent,
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 1),
)
p = ds.init(jax.random.PRNGKey(42), x)
assert jnp.allclose(ds.apply(p, x), ds.apply(p, xp))
def test_deepsets_error():
hilb = nk.hilbert.Particle(N=2, L=1.0, pbc=True)
sdim = len(hilb.extent)
x = jnp.hstack([jnp.ones(4), -jnp.ones(4)]).reshape(1, -1)
xp = jnp.roll(x, sdim)
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
layers_phi=3,
layers_rho=3,
features_phi=(10, 10),
features_rho=(10, 1),
)
with pytest.raises(ValueError):
p = ds.init(jax.random.PRNGKey(42), x)
with pytest.raises(AssertionError):
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 2),
)
p = ds.init(jax.random.PRNGKey(42), x)
with pytest.raises(ValueError):
ds = nk.models.DeepSetRelDistance(
hilbert=nk.hilbert.Particle(N=2, L=1.0, pbc=False),
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 2),
)
p = ds.init(jax.random.PRNGKey(42), x)
| 29.125
| 87
| 0.616075
|
0a6960ef625a34357baabcef7c8cfc323041eb62
| 1,411
|
py
|
Python
|
ariadne_server/context/database.py
|
seanaye/FeatherLight-API
|
4d42a424762311ee35b3fd4f689883aa4197eb2e
|
[
"MIT"
] | null | null | null |
ariadne_server/context/database.py
|
seanaye/FeatherLight-API
|
4d42a424762311ee35b3fd4f689883aa4197eb2e
|
[
"MIT"
] | null | null | null |
ariadne_server/context/database.py
|
seanaye/FeatherLight-API
|
4d42a424762311ee35b3fd4f689883aa4197eb2e
|
[
"MIT"
] | null | null | null |
import os
import asyncio
from socket import gaierror
from gino import Gino
from helpers.mixins import LoggerMixin
class GinoInstance(LoggerMixin):
"""Gino connection manager"""
def __init__(self):
self._host = os.environ.get('POSTGRES_HOST')
self._user = os.environ.get('POSTGRES_USER')
self._password = os.environ.get('POSTGRES_PASSWORD')
self._db_name = os.environ.get('POSTGRES_DB')
self.db = Gino()
async def initialize(self) -> None:
"""init db connection"""
bind_str = f"postgresql://{self._user}:{self._password}@{self._host}/{self._db_name}"
i = 1
done = False
while not done:
self.logger.info(f"attempt {i}")
try:
self.logger.info(f'connecting to: {bind_str}, attempt {i}')
await self.db.set_bind(bind_str)
self.logger.info('bound to db')
await self.db.gino.create_all()
self.logger.info('created tables')
done = True
except gaierror as e:
self.logger.warning(e)
self.logger.warning(f'DB connect attempt {i} failed')
await asyncio.sleep(5)
i += 1
self.logger.info('finished')
return
async def destroy(self) -> None:
""" close connection"""
await self.db.pop_bind().close()
| 32.813953
| 93
| 0.572644
|
c35092dcfd77b9e8d51987c8c9622a956257f6d0
| 1,568
|
py
|
Python
|
tests/test_pipeline.py
|
FcoJavT/rantanplan
|
21cad3cb106659d97035bd9ac7f5dbe025f2ff8e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pipeline.py
|
FcoJavT/rantanplan
|
21cad3cb106659d97035bd9ac7f5dbe025f2ff8e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pipeline.py
|
FcoJavT/rantanplan
|
21cad3cb106659d97035bd9ac7f5dbe025f2ff8e
|
[
"Apache-2.0"
] | null | null | null |
import spacy
from rantanplan.pipeline import load_pipeline
test_dict_list = [
{'text': 'prue', 'pos_': '', 'tag_': '',
'n_rights': 0},
{'text': '-', 'pos_': '', 'tag_': '',
'n_rights': 0},
{'text': '\n', 'pos_': '', 'tag_': '',
'n_rights': 0},
{'text': 'ba', 'pos_': '', 'tag_': '',
'n_rights': 0}]
def test_load_pipeline(monkeypatch):
def mockreturn(lang=None):
nlp = spacy.blank('es') # noqa
nlp.vocab.lookups.get_table = lambda *_: {}
return nlp
monkeypatch.setattr(spacy, 'load', mockreturn)
# lang doesn't matter as long as it hasn't been used in the test session
nlp = load_pipeline("blank")
doc = nlp("prue-\nba")
token_dict = []
for token in doc:
token_dict.append(
{"text": token.text, "pos_": token.pos_, "tag_": token.tag_,
"n_rights": token.n_rights}) # noqa
assert token_dict == test_dict_list
def test_load_pipeline_affixes(monkeypatch):
def mockreturn(lang=None):
nlp = spacy.blank('es') # noqa
nlp.vocab.lookups.get_table = lambda *_: {}
return nlp
monkeypatch.setattr(spacy, 'load', mockreturn)
# lang doesn't matter as long as it hasn't been used in the test session
nlp = load_pipeline("blank", split_affixes=False)
doc = nlp("prue-\nba")
token_dict = []
for token in doc:
token_dict.append(
{"text": token.text, "pos_": token.pos_, "tag_": token.tag_,
"n_rights": token.n_rights}) # noqa
assert token_dict == test_dict_list
| 31.36
| 76
| 0.589923
|
a9209f1cbdd7be87f57fe1746e4c3ea098e04485
| 15,117
|
py
|
Python
|
pytorch_lightning/loggers/neptune.py
|
KyleGoyette/pytorch-lightning
|
d6470bf1937e51e037a7f94a55ad76898e5ae103
|
[
"Apache-2.0"
] | 3
|
2021-04-09T14:03:03.000Z
|
2021-04-10T02:58:23.000Z
|
pytorch_lightning/loggers/neptune.py
|
KyleGoyette/pytorch-lightning
|
d6470bf1937e51e037a7f94a55ad76898e5ae103
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/loggers/neptune.py
|
KyleGoyette/pytorch-lightning
|
d6470bf1937e51e037a7f94a55ad76898e5ae103
|
[
"Apache-2.0"
] | 1
|
2021-09-16T15:14:11.000Z
|
2021-09-16T15:14:11.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Neptune Logger
--------------
"""
import logging
from argparse import Namespace
from typing import Any, Dict, Iterable, Optional, Union
import torch
from torch import is_tensor
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
from pytorch_lightning.utilities import _module_available, rank_zero_only
log = logging.getLogger(__name__)
_NEPTUNE_AVAILABLE = _module_available("neptune")
if _NEPTUNE_AVAILABLE:
import neptune
from neptune.experiments import Experiment
else:
# needed for test mocks, these tests shall be updated
neptune, Experiment = None, None
class NeptuneLogger(LightningLoggerBase):
r"""
Log using `Neptune <https://neptune.ai>`_.
Install it with pip:
.. code-block:: bash
pip install neptune-client
The Neptune logger can be used in the online mode or offline (silent) mode.
To log experiment data in online mode, :class:`NeptuneLogger` requires an API key.
In offline mode, the logger does not connect to Neptune.
**ONLINE MODE**
.. testcode::
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import NeptuneLogger
# arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class
# We are using an api_key for the anonymous user "neptuner" but you can use your own.
neptune_logger = NeptuneLogger(
api_key='ANONYMOUS',
project_name='shared/pytorch-lightning-integration',
experiment_name='default', # Optional,
params={'max_epochs': 10}, # Optional,
tags=['pytorch-lightning', 'mlp'] # Optional,
)
trainer = Trainer(max_epochs=10, logger=neptune_logger)
**OFFLINE MODE**
.. testcode::
from pytorch_lightning.loggers import NeptuneLogger
# arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class
neptune_logger = NeptuneLogger(
offline_mode=True,
project_name='USER_NAME/PROJECT_NAME',
experiment_name='default', # Optional,
params={'max_epochs': 10}, # Optional,
tags=['pytorch-lightning', 'mlp'] # Optional,
)
trainer = Trainer(max_epochs=10, logger=neptune_logger)
Use the logger anywhere in you :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:
.. code-block:: python
class LitModel(LightningModule):
def training_step(self, batch, batch_idx):
# log metrics
self.logger.experiment.log_metric('acc_train', ...)
# log images
self.logger.experiment.log_image('worse_predictions', ...)
# log model checkpoint
self.logger.experiment.log_artifact('model_checkpoint.pt', ...)
self.logger.experiment.whatever_neptune_supports(...)
def any_lightning_module_function_or_hook(self):
self.logger.experiment.log_metric('acc_train', ...)
self.logger.experiment.log_image('worse_predictions', ...)
self.logger.experiment.log_artifact('model_checkpoint.pt', ...)
self.logger.experiment.whatever_neptune_supports(...)
If you want to log objects after the training is finished use ``close_after_fit=False``:
.. code-block:: python
neptune_logger = NeptuneLogger(
...
close_after_fit=False,
...
)
trainer = Trainer(logger=neptune_logger)
trainer.fit()
# Log test metrics
trainer.test(model)
# Log additional metrics
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_true, y_pred)
neptune_logger.experiment.log_metric('test_accuracy', accuracy)
# Log charts
from scikitplot.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(16, 12))
plot_confusion_matrix(y_true, y_pred, ax=ax)
neptune_logger.experiment.log_image('confusion_matrix', fig)
# Save checkpoints folder
neptune_logger.experiment.log_artifact('my/checkpoints')
# When you are done, stop the experiment
neptune_logger.experiment.stop()
See Also:
- An `Example experiment <https://ui.neptune.ai/o/shared/org/
pytorch-lightning-integration/e/PYTOR-66/charts>`_ showing the UI of Neptune.
- `Tutorial <https://docs.neptune.ai/integrations/pytorch_lightning.html>`_ on how to use
Pytorch Lightning with Neptune.
Args:
api_key: Required in online mode.
Neptune API token, found on https://neptune.ai.
Read how to get your
`API key <https://docs.neptune.ai/python-api/tutorials/get-started.html#copy-api-token>`_.
It is recommended to keep it in the `NEPTUNE_API_TOKEN`
environment variable and then you can leave ``api_key=None``.
project_name: Required in online mode. Qualified name of a project in a form of
"namespace/project_name" for example "tom/minst-classification".
If ``None``, the value of `NEPTUNE_PROJECT` environment variable will be taken.
You need to create the project in https://neptune.ai first.
offline_mode: Optional default ``False``. If ``True`` no logs will be sent
to Neptune. Usually used for debug purposes.
close_after_fit: Optional default ``True``. If ``False`` the experiment
will not be closed after training and additional metrics,
images or artifacts can be logged. Also, remember to close the experiment explicitly
by running ``neptune_logger.experiment.stop()``.
experiment_name: Optional. Editable name of the experiment.
Name is displayed in the experiment’s Details (Metadata section) and
in experiments view as a column.
experiment_id: Optional. Default is ``None``. The ID of the existing experiment.
If specified, connect to experiment with experiment_id in project_name.
Input arguments "experiment_name", "params", "properties" and "tags" will be overriden based
on fetched experiment data.
prefix: A string to put at the beginning of metric keys.
\**kwargs: Additional arguments like `params`, `tags`, `properties`, etc. used by
:func:`neptune.Session.create_experiment` can be passed as keyword arguments in this logger.
Raises:
ImportError:
If required Neptune package is not installed on the device.
"""
LOGGER_JOIN_CHAR = '-'
def __init__(
self,
api_key: Optional[str] = None,
project_name: Optional[str] = None,
close_after_fit: Optional[bool] = True,
offline_mode: bool = False,
experiment_name: Optional[str] = None,
experiment_id: Optional[str] = None,
prefix: str = '',
**kwargs
):
if neptune is None:
raise ImportError(
'You want to use `neptune` logger which is not installed yet,'
' install it with `pip install neptune-client`.'
)
super().__init__()
self.api_key = api_key
self.project_name = project_name
self.offline_mode = offline_mode
self.close_after_fit = close_after_fit
self.experiment_name = experiment_name
self._prefix = prefix
self._kwargs = kwargs
self.experiment_id = experiment_id
self._experiment = None
log.info(f'NeptuneLogger will work in {"offline" if self.offline_mode else "online"} mode')
def __getstate__(self):
state = self.__dict__.copy()
# Experiment cannot be pickled, and additionally its ID cannot be pickled in offline mode
state['_experiment'] = None
if self.offline_mode:
state['experiment_id'] = None
return state
@property
@rank_zero_experiment
def experiment(self) -> Experiment:
r"""
Actual Neptune object. To use neptune features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
self.logger.experiment.some_neptune_function()
"""
# Note that even though we initialize self._experiment in __init__,
# it may still end up being None after being pickled and un-pickled
if self._experiment is None:
self._experiment = self._create_or_get_experiment()
return self._experiment
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
params = self._convert_params(params)
params = self._flatten_dict(params)
for key, val in params.items():
self.experiment.set_property(f'param__{key}', val)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, Union[torch.Tensor, float]], step: Optional[int] = None) -> None:
"""
Log metrics (numeric values) in Neptune experiments.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
step: Step number at which the metrics should be recorded, currently ignored
"""
assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'
metrics = self._add_prefix(metrics)
for key, val in metrics.items():
# `step` is ignored because Neptune expects strictly increasing step values which
# Lighting does not always guarantee.
self.log_metric(key, val)
@rank_zero_only
def finalize(self, status: str) -> None:
super().finalize(status)
if self.close_after_fit:
self.experiment.stop()
@property
def save_dir(self) -> Optional[str]:
# Neptune does not save any local files
return None
@property
def name(self) -> str:
if self.offline_mode:
return 'offline-name'
else:
return self.experiment.name
@property
def version(self) -> str:
if self.offline_mode:
return 'offline-id-1234'
else:
return self.experiment.id
@rank_zero_only
def log_metric(
self, metric_name: str, metric_value: Union[torch.Tensor, float, str], step: Optional[int] = None
) -> None:
"""
Log metrics (numeric values) in Neptune experiments.
Args:
metric_name: The name of log, i.e. mse, loss, accuracy.
metric_value: The value of the log (data-point).
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
if is_tensor(metric_value):
metric_value = metric_value.cpu().detach()
if step is None:
self.experiment.log_metric(metric_name, metric_value)
else:
self.experiment.log_metric(metric_name, x=step, y=metric_value)
@rank_zero_only
def log_text(self, log_name: str, text: str, step: Optional[int] = None) -> None:
"""
Log text data in Neptune experiments.
Args:
log_name: The name of log, i.e. mse, my_text_data, timing_info.
text: The value of the log (data-point).
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
self.experiment.log_text(log_name, text, step=step)
@rank_zero_only
def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None) -> None:
"""
Log image data in Neptune experiment
Args:
log_name: The name of log, i.e. bboxes, visualisations, sample_images.
image: The value of the log (data-point).
Can be one of the following types: PIL image, `matplotlib.figure.Figure`,
path to image file (str)
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
if step is None:
self.experiment.log_image(log_name, image)
else:
self.experiment.log_image(log_name, x=step, y=image)
@rank_zero_only
def log_artifact(self, artifact: str, destination: Optional[str] = None) -> None:
"""Save an artifact (file) in Neptune experiment storage.
Args:
artifact: A path to the file in local filesystem.
destination: Optional. Default is ``None``. A destination path.
If ``None`` is passed, an artifact file name will be used.
"""
self.experiment.log_artifact(artifact, destination)
@rank_zero_only
def set_property(self, key: str, value: Any) -> None:
"""
Set key-value pair as Neptune experiment property.
Args:
key: Property key.
value: New value of a property.
"""
self.experiment.set_property(key, value)
@rank_zero_only
def append_tags(self, tags: Union[str, Iterable[str]]) -> None:
"""
Appends tags to the neptune experiment.
Args:
tags: Tags to add to the current experiment. If str is passed, a single tag is added.
If multiple - comma separated - str are passed, all of them are added as tags.
If list of str is passed, all elements of the list are added as tags.
"""
if str(tags) == tags:
tags = [tags] # make it as an iterable is if it is not yet
self.experiment.append_tags(*tags)
def _create_or_get_experiment(self):
if self.offline_mode:
project = neptune.Session(backend=neptune.OfflineBackend()).get_project('dry-run/project')
else:
session = neptune.Session.with_default_backend(api_token=self.api_key)
project = session.get_project(self.project_name)
if self.experiment_id is None:
exp = project.create_experiment(name=self.experiment_name, **self._kwargs)
self.experiment_id = exp.id
else:
exp = project.get_experiments(id=self.experiment_id)[0]
self.experiment_name = exp.get_system_properties()['name']
self.params = exp.get_parameters()
self.properties = exp.get_properties()
self.tags = exp.get_tags()
return exp
| 38.36802
| 110
| 0.641529
|
7d6855a6cb17ae987e725a4e7adf90920313e807
| 4,545
|
py
|
Python
|
sdk/python/pulumi_kubernetes/coordination/v1/Lease.py
|
RichardWLaub/pulumi-kubernetes
|
19c8f1bd26e54bf5b646423a625151c95ea40594
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/coordination/v1/Lease.py
|
RichardWLaub/pulumi-kubernetes
|
19c8f1bd26e54bf5b646423a625151c95ea40594
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/coordination/v1/Lease.py
|
RichardWLaub/pulumi-kubernetes
|
19c8f1bd26e54bf5b646423a625151c95ea40594
|
[
"Apache-2.0"
] | null | null | null |
# *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class Lease(pulumi.CustomResource):
"""
Lease defines a lease concept.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
metadata: pulumi.Output[dict]
"""
More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
spec: pulumi.Output[dict]
"""
Specification of the Lease. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
def __init__(self, resource_name, opts=None, metadata=None, spec=None, __name__=None, __opts__=None):
"""
Create a Lease resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[dict] metadata: More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[dict] spec: Specification of the Lease. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'coordination.k8s.io/v1'
__props__['kind'] = 'Lease'
__props__['metadata'] = metadata
__props__['spec'] = spec
__props__['status'] = None
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
))
super(Lease, self).__init__(
"kubernetes:coordination.k8s.io/v1:Lease",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `Lease` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return Lease(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 40.945946
| 114
| 0.676128
|
7077dcfb7e15dc57093fca385d40078d19155654
| 1,099
|
py
|
Python
|
zingmp3/downloader.py
|
mazino2d/data-crawler
|
8acd60694bb3cf9514184bdb21d2f0f9e7051cf7
|
[
"MIT"
] | null | null | null |
zingmp3/downloader.py
|
mazino2d/data-crawler
|
8acd60694bb3cf9514184bdb21d2f0f9e7051cf7
|
[
"MIT"
] | null | null | null |
zingmp3/downloader.py
|
mazino2d/data-crawler
|
8acd60694bb3cf9514184bdb21d2f0f9e7051cf7
|
[
"MIT"
] | null | null | null |
import requests
import wget
import pandas as pd
import argparse
from os import listdir, mkdir, system
# Add the arguments to the parser
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--count", required=True)
args = vars(ap.parse_args())
COUNT = int(args['count'])
# read file to get song id
song_info = pd.read_csv('./trend_song.csv')
song_id = song_info['id']
exist_ids = []
try:
exist_ids = [int(f[:-4]) for f in listdir("./data")]
except FileNotFoundError:
print("[WARNING] No 'data' directory")
mkdir('./data')
song_id = [x for x in song_id if x not in exist_ids]
# api-endpoint (this is private link)
URL = "https://zmediadata.zingmp3.vn/api/song/mGetInfoMedia?" + \
"infoSrc=webZMD&typeLink=audio_video&listKey=%s"
count = 1
for id in song_id:
# sending get request and saving the response as response object
r = requests.get(url=URL % (id))
# extracting data in json format
link = r.json()[0]['link']
# download mp3 file
wget.download(link, 'data/%s.mp3' % (id))
if count == COUNT:
break
else:
count = count + 1
| 24.422222
| 68
| 0.66697
|
ecb44b8958d3fcc7bfdd0cecb642b7329df6a8d7
| 11,355
|
py
|
Python
|
indico/modules/events/contributions/controllers/display.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
indico/modules/events/contributions/controllers/display.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
indico/modules/events/contributions/controllers/display.py
|
tobiashuste/indico
|
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import jsonify, request, session
from sqlalchemy.orm import joinedload, load_only
from werkzeug.exceptions import Forbidden, NotFound
from indico.core.config import config
from indico.core.db import db
from indico.legacy.pdfinterface.latex import ContribsToPDF, ContribToPDF
from indico.modules.events.abstracts.util import filter_field_values
from indico.modules.events.contributions import contribution_settings
from indico.modules.events.contributions.lists import ContributionDisplayListGenerator
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.persons import AuthorType, ContributionPersonLink
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.contributions.util import (get_contribution_ical_file,
get_contributions_with_user_as_submitter,
has_contributions_with_user_as_submitter)
from indico.modules.events.contributions.views import WPAuthorList, WPContributions, WPMyContributions, WPSpeakerList
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.layout.util import is_menu_entry_enabled
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.util import get_base_ical_parameters
from indico.util.i18n import _
from indico.web.flask.util import jsonify_data, send_file
from indico.web.rh import RH
from indico.web.util import jsonify_template
def _get_persons(event, condition):
"""Queries event persons linked to contributions in the event, filtered using the condition provided."""
return (event.persons.filter(EventPerson.contribution_links.any(
db.and_(condition,
ContributionPersonLink.contribution.has(~Contribution.is_deleted))))
.options(joinedload('contribution_links').joinedload('contribution'))
.order_by(db.func.lower(EventPerson.last_name)))
def _author_page_active(event):
return is_menu_entry_enabled('author_index', event) or is_menu_entry_enabled('contributions', event)
class RHContributionDisplayBase(RHDisplayEventBase):
normalize_url_spec = {
'locators': {
lambda self: self.contrib
}
}
def _check_access(self):
RHDisplayEventBase._check_access(self)
if not self.contrib.can_access(session.user):
raise Forbidden
published = contribution_settings.get(self.event, 'published')
if (not published and not self.event.can_manage(session.user)
and not self.contrib.is_user_associated(session.user)):
raise NotFound(_("The contributions of this event have not been published yet."))
def _process_args(self):
RHDisplayEventBase._process_args(self)
self.contrib = Contribution.get_one(request.view_args['contrib_id'], is_deleted=False)
class RHDisplayProtectionBase(RHDisplayEventBase):
def _check_access(self):
RHDisplayEventBase._check_access(self)
published = contribution_settings.get(self.event, 'published')
if not published and not has_contributions_with_user_as_submitter(self.event, session.user):
raise NotFound(_("The contributions of this event have not been published yet."))
if not is_menu_entry_enabled(self.MENU_ENTRY_NAME, self.event):
self._forbidden_if_not_admin()
class RHMyContributions(RHDisplayProtectionBase):
"""Display list of current user contributions"""
MENU_ENTRY_NAME = 'my_contributions'
def _check_access(self):
RHDisplayProtectionBase._check_access(self)
if not session.user:
raise Forbidden
def _process(self):
contributions = get_contributions_with_user_as_submitter(self.event, session.user)
return WPMyContributions.render_template('display/user_contribution_list.html', self.event,
contributions=contributions)
class RHContributionList(RHDisplayProtectionBase):
"""Display list of event contributions"""
MENU_ENTRY_NAME = 'contributions'
view_class = WPContributions
def _process_args(self):
RHDisplayEventBase._process_args(self)
self.list_generator = ContributionDisplayListGenerator(event=self.event)
def _process(self):
return self.view_class.render_template('display/contribution_list.html', self.event,
timezone=self.event.display_tzinfo,
**self.list_generator.get_list_kwargs())
class RHContributionDisplay(RHContributionDisplayBase):
"""Display page with contribution details """
view_class = WPContributions
def _process(self):
ical_params = get_base_ical_parameters(session.user, 'contributions',
'/export/event/{0}.ics'.format(self.event.id))
contrib = (Contribution.query
.filter_by(id=self.contrib.id)
.options(joinedload('type'),
joinedload('session'),
joinedload('subcontributions'),
joinedload('timetable_entry').lazyload('*'))
.one())
can_manage = self.event.can_manage(session.user)
owns_abstract = contrib.abstract.user_owns(session.user) if contrib.abstract else None
field_values = filter_field_values(contrib.field_values, can_manage, owns_abstract)
return self.view_class.render_template('display/contribution_display.html', self.event,
contribution=contrib,
show_author_link=_author_page_active(self.event),
field_values=field_values,
page_title=contrib.title,
published=contribution_settings.get(self.event, 'published'),
**ical_params)
class RHAuthorList(RHDisplayProtectionBase):
MENU_ENTRY_NAME = 'author_index'
view_class = WPAuthorList
def _process(self):
authors = _get_persons(self.event, ContributionPersonLink.author_type != AuthorType.none)
return self.view_class.render_template('display/author_list.html', self.event, authors=authors)
class RHSpeakerList(RHDisplayProtectionBase):
MENU_ENTRY_NAME = 'speaker_index'
view_class = WPSpeakerList
def _process(self):
speakers = _get_persons(self.event, ContributionPersonLink.is_speaker)
return self.view_class.render_template('display/speaker_list.html', self.event, speakers=speakers)
class RHContributionAuthor(RHContributionDisplayBase):
"""Display info about an author"""
normalize_url_spec = {
'locators': {
lambda self: self.author
}
}
def _check_access(self):
RHContributionDisplayBase._check_access(self)
if not _author_page_active(self.event):
self._forbidden_if_not_admin()
def _process_args(self):
RHContributionDisplayBase._process_args(self)
self.author = (ContributionPersonLink.find_one(ContributionPersonLink.author_type != AuthorType.none,
id=request.view_args['person_id'],
contribution=self.contrib))
def _process(self):
author_contribs = (Contribution.query.with_parent(self.event)
.join(ContributionPersonLink)
.options(joinedload('event'))
.options(load_only('id', 'title'))
.filter(ContributionPersonLink.id == self.author.id,
ContributionPersonLink.author_type != AuthorType.none)
.all())
return WPContributions.render_template('display/contribution_author.html', self.event,
author=self.author, contribs=author_contribs)
class RHContributionExportToPDF(RHContributionDisplayBase):
def _process(self):
if not config.LATEX_ENABLED:
raise NotFound
pdf = ContribToPDF(self.contrib)
return send_file('contribution.pdf', pdf.generate(), 'application/pdf')
class RHContributionsExportToPDF(RHContributionList):
def _process(self):
if not config.LATEX_ENABLED:
raise NotFound
contribs = self.list_generator.get_list_kwargs()['contribs']
pdf = ContribsToPDF(self.event, contribs)
return send_file('contributions.pdf', pdf.generate(), 'application/pdf')
class RHContributionExportToICAL(RHContributionDisplayBase):
"""Export contribution to ICS"""
def _process(self):
if not self.contrib.is_scheduled:
raise NotFound('This contribution is not scheduled')
return send_file('contribution.ics', get_contribution_ical_file(self.contrib), 'text/calendar')
class RHContributionListFilter(RHContributionList):
"""Display dialog with filters"""
def _process(self):
return RH._process(self)
def _process_GET(self):
return jsonify_template('events/contributions/contrib_list_filter.html',
filters=self.list_generator.list_config['filters'],
static_items=self.list_generator.static_items)
def _process_POST(self):
self.list_generator.store_configuration()
return jsonify_data(**self.list_generator.render_contribution_list())
class RHContributionListDisplayStaticURL(RHContributionList):
"""Generate static URL for the current set of filters"""
def _process(self):
return jsonify(url=self.list_generator.generate_static_url())
class RHSubcontributionDisplay(RHDisplayEventBase):
normalize_url_spec = {
'locators': {
lambda self: self.subcontrib
}
}
view_class = WPContributions
def _check_access(self):
RHDisplayEventBase._check_access(self)
if not self.subcontrib.can_access(session.user):
raise Forbidden
published = contribution_settings.get(self.event, 'published')
if (not published and not self.event.can_manage(session.user)
and not self.contrib.is_user_associated(session.user)):
raise NotFound(_("The contributions of this event have not been published yet."))
def _process_args(self):
RHDisplayEventBase._process_args(self)
self.subcontrib = SubContribution.get_one(request.view_args['subcontrib_id'], is_deleted=False)
def _process(self):
return self.view_class.render_template('display/subcontribution_display.html', self.event,
subcontrib=self.subcontrib)
| 43.011364
| 117
| 0.67609
|
0162bf4530e15cfd061a6c53d4bc1202fcd71625
| 943
|
py
|
Python
|
frappe/core/page/applications/applications.py
|
cadencewatches/frappe
|
d9dcf132a10d68b2dcc80ef348e6d967f1e44084
|
[
"MIT"
] | null | null | null |
frappe/core/page/applications/applications.py
|
cadencewatches/frappe
|
d9dcf132a10d68b2dcc80ef348e6d967f1e44084
|
[
"MIT"
] | null | null | null |
frappe/core/page/applications/applications.py
|
cadencewatches/frappe
|
d9dcf132a10d68b2dcc80ef348e6d967f1e44084
|
[
"MIT"
] | 1
|
2018-03-21T15:51:46.000Z
|
2018-03-21T15:51:46.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.installer
from frappe import _
@frappe.whitelist()
def get_app_list():
out = {}
installed = frappe.get_installed_apps()
for app in frappe.get_all_apps(True):
app_hooks = frappe.get_hooks(app_name=app)
if app_hooks.get('hide_in_installer'):
continue
out[app] = {}
for key in ("app_name", "app_title", "app_description", "app_icon",
"app_publisher", "app_version", "app_url", "app_color"):
val = app_hooks.get(key) or []
out[app][key] = val[0] if len(val) else ""
if app in installed:
out[app]["installed"] = 1
return out
@frappe.whitelist()
def install_app(name):
app_hooks = frappe.get_hooks(app_name=name)
if app_hooks.get('hide_in_installer'):
frappe.throw(_("You cannot install this app"))
frappe.installer.install_app(name)
| 25.486486
| 71
| 0.720042
|
d603d5c4455fc455616a84ba502a0767c772ec24
| 260
|
py
|
Python
|
data.py
|
jonidakolgjini/quiz_game
|
eae9a197f6831f0413b3f9e0355ca0ce4b7d95a2
|
[
"MIT"
] | null | null | null |
data.py
|
jonidakolgjini/quiz_game
|
eae9a197f6831f0413b3f9e0355ca0ce4b7d95a2
|
[
"MIT"
] | null | null | null |
data.py
|
jonidakolgjini/quiz_game
|
eae9a197f6831f0413b3f9e0355ca0ce4b7d95a2
|
[
"MIT"
] | null | null | null |
import requests
parameters = {
"amount": 10,
"type": "boolean"
}
response = requests.get(url="https://opentdb.com/api.php?amount=10&type=boolean", params=parameters)
response.raise_for_status()
data = response.json()
question_data = data["results"]
| 20
| 100
| 0.711538
|
2cc26e8429e93f90242a3ec19c7f238335783e58
| 1,871
|
py
|
Python
|
qiskit/providers/ibmq/api/exceptions.py
|
dowem/qiskit-ibmq-provider
|
dfdc56847ed5a0920e764bc7e7cea39f77a0aab2
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/ibmq/api/exceptions.py
|
dowem/qiskit-ibmq-provider
|
dfdc56847ed5a0920e764bc7e7cea39f77a0aab2
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/ibmq/api/exceptions.py
|
dowem/qiskit-ibmq-provider
|
dfdc56847ed5a0920e764bc7e7cea39f77a0aab2
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Exceptions related to the IBM Quantum Experience API."""
from ..exceptions import IBMQError
class ApiError(IBMQError):
"""Generic IBM Quantum Experience API error."""
pass
class RequestsApiError(ApiError):
"""Exception re-raising a RequestException."""
def __init__(self, message: str, status_code: int = -1):
"""RequestsApiError constructor.
Args:
message: Exception message.
status_code: Response status code. -1 for unknown status code.
"""
super().__init__(message)
self.status_code = status_code
class WebsocketError(ApiError):
"""Exceptions related to websockets."""
pass
class WebsocketIBMQProtocolError(WebsocketError):
"""Exceptions related to IBM Quantum protocol error."""
pass
class WebsocketAuthenticationError(WebsocketError):
"""Exception caused during websocket authentication."""
pass
class WebsocketTimeoutError(WebsocketError):
"""Timeout during websocket communication."""
pass
class AuthenticationLicenseError(ApiError):
"""Exception due to user not having accepted the license agreement."""
pass
class ApiIBMQProtocolError(ApiError):
"""Exception related to IBM Quantum API protocol error."""
pass
class UserTimeoutExceededError(ApiError):
"""Exceptions related to exceeding user defined timeout."""
pass
| 26.728571
| 77
| 0.718867
|
d52797677e4ee82875503b2486fa1a15a50e1e75
| 10,290
|
py
|
Python
|
harvardit/harvardit/settings.py
|
arochaga/harvardit
|
67e2661942fb5be8bd8a68e078fd11c8899c59de
|
[
"Apache-2.0"
] | 4
|
2018-05-22T20:56:00.000Z
|
2018-08-16T07:14:05.000Z
|
harvardit/harvardit/settings.py
|
arochaga/harvardit
|
67e2661942fb5be8bd8a68e078fd11c8899c59de
|
[
"Apache-2.0"
] | null | null | null |
harvardit/harvardit/settings.py
|
arochaga/harvardit
|
67e2661942fb5be8bd8a68e078fd11c8899c59de
|
[
"Apache-2.0"
] | 4
|
2018-05-23T13:54:16.000Z
|
2018-09-07T19:30:09.000Z
|
"""
Django settings for harvardit project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q^%n-tc#sqwk$*bzccl@foufciljlqu5dm*6ht@sh!+uu%tzf!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'harvardit',
'bootstrap4',
'web3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'harvardit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'harvardit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(asctime)s >>>>> %(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'filename': BASE_DIR + '/log/django-harvardit.log',
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['logfile', 'console', 'mail_admins',],
'level': 'INFO',
'propagate': True,
},
'harvardit': {
'handlers': ['logfile', 'console', 'mail_admins',],
'level': 'DEBUG',
}
},
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/static-hyperledger_traceit/static'
LOGIN_URL = '/'
SC_ABI = '[{"constant":true,"inputs":[],"name":"subjectsCompleted","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"subject","type":"int8"},{"name":"theGrade","type":"int8"}],"name":"setGrade","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"degreeObtained","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"professorAddress","type":"address"}],"name":"setProfessor","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"thesis","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"professors","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"thesisHash","type":"bytes32"}],"name":"registerThesis","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"int8"}],"name":"grades","outputs":[{"name":"","type":"int8"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"}]'
SC_BYTECODE = '0x608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600160008060000b815260200190815260200160002060006101000a81548160ff021916908360000b60ff1602179055507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60016000600160000b815260200190815260200160002060006101000a81548160ff021916908360000b60ff1602179055507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60016000600260000b815260200190815260200160002060006101000a81548160ff021916908360000b60ff1602179055506000600360006101000a81548160ff0219169083151502179055506000600360016101000a81548160ff02191690831515021790555061066b8061018e6000396000f300608060405260043610610099576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063164afb271461009e57806322fecf09146100cd57806357244b471461010a57806375b1c531146101395780638da5cb5b1461017c578063a03e9608146101d3578063cd1e9e2b14610206578063d829bc4714610261578063de28b85814610292575b600080fd5b3480156100aa57600080fd5b506100b36102dc565b604051808215151515815260200191505060405180910390f35b3480156100d957600080fd5b50610108600480360381019080803560000b9060200190929190803560000b90602001909291905050506102ef565b005b34801561011657600080fd5b5061011f61038f565b604051808215151515815260200191505060405180910390f35b34801561014557600080fd5b5061017a600480360381019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506103a2565b005b34801561018857600080fd5b50610191610458565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156101df57600080fd5b506101e861047d565b60405180826000191660001916815260200191505060405180910390f35b34801561021257600080fd5b50610247600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610483565b604051808215151515815260200191505060405180910390f35b34801561026d57600080fd5b5061029060048036038101908080356000191690602001909291905050506104a3565b005b34801561029e57600080fd5b506102c0600480360381019080803560000b9060200190929190505050610549565b604051808260000b60000b815260200191505060405180910390f35b600360009054906101000a900460ff1681565b60011515600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff16151514151561034e57600080fd5b80600160008460000b60000b815260200190815260200160002060006101000a81548160ff021916908360000b60ff16021790555061038b610569565b5050565b600360019054906101000a900460ff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156103fd57600080fd5b6001600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60045481565b60026020528060005260406000206000915054906101000a900460ff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156104fe57600080fd5b60011515600360009054906101000a900460ff16151514151561052057600080fd5b80600481600019169055506001600360016101000a81548160ff02191690831515021790555050565b60016020528060005260406000206000915054906101000a900460000b81565b6005600160008060000b815260200190815260200160002060009054906101000a900460000b60000b121580156105c85750600560016000600160000b815260200190815260200160002060009054906101000a900460000b60000b12155b80156105fc5750600560016000600260000b815260200190815260200160002060009054906101000a900460000b60000b12155b15610621576001600360006101000a81548160ff02191690831515021790555061063d565b6000600360006101000a81548160ff0219169083151502179055505b5600a165627a7a723058203a47828fe163ba7cf0f3f3228cc2c2c7982a5bd2f43d56303d0f13cfd822dc840029'
| 57.486034
| 4,100
| 0.773372
|
6120bb666da2bcd0f2a9e6f393139807ea69b77c
| 6,159
|
py
|
Python
|
useintest/executables/builders.py
|
wtsi-hgi/startfortest
|
426343c0ff340d4d83575cdafe2c4184707e7693
|
[
"MIT"
] | 1
|
2019-06-18T20:56:42.000Z
|
2019-06-18T20:56:42.000Z
|
useintest/executables/builders.py
|
wtsi-hgi/useintest
|
426343c0ff340d4d83575cdafe2c4184707e7693
|
[
"MIT"
] | 3
|
2017-09-21T12:14:44.000Z
|
2018-02-19T11:18:47.000Z
|
useintest/executables/builders.py
|
wtsi-hgi/useintest
|
426343c0ff340d4d83575cdafe2c4184707e7693
|
[
"MIT"
] | null | null | null |
import argparse
import base64
import os
import sys
from copy import deepcopy
from typing import List, Iterable, Dict, Set, Callable, Any, Union
from dill import dill
from useintest.executables.common import CLI_ARGUMENTS
_PROJECT_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
_ARGUMENTS_TO_MOUNT_SCRIPT = os.path.join(_PROJECT_DIRECTORY, "executables", "paths_to_mount.py")
class CommandsBuilder:
"""
Builds commands to run an executable in Docker.
"""
def __init__(self, executable: str=None, container: str=None, image: str=None, executable_arguments: List[str]=None,
get_path_arguments_to_mount: Callable[[List[Any]], Set[str]]=None,
ports: Dict[int, int]=None, mounts: Dict[str, Union[str, Set[str]]]=None,
variables: Iterable[str]=None, name: str=None, detached: bool=False, other_docker: str=""):
self.executable = executable
self.container = container
self.image = image
self.executable_arguments = executable_arguments
self.get_path_arguments_to_mount = get_path_arguments_to_mount \
if get_path_arguments_to_mount is not None else lambda arguments: set()
self.ports = ports if ports is not None else dict()
self.mounts = mounts if mounts is not None else dict()
self.variables = variables if variables is not None else {}
self.name = name
self.detached = detached
self.other_docker = other_docker
def build(self) -> str:
"""
Builds the commands.
:return: build comamnds.
"""
if self.container is None and self.image is None:
raise ValueError("Must define either the Docker image or container to run commands in")
if self.container is not None and self.image is not None:
raise ValueError("Cannot build Docker command to work in for both an image and a running container")
mounts = ""
for local_volume, container_volumes in self.mounts.items():
if not isinstance(container_volumes, set):
container_volumes = {container_volumes}
for container_volume in container_volumes:
mounts += "-v %s:%s " % (local_volume, container_volume)
ports = ""
for local_port, container_port in self.ports.items():
ports += "-p %d:%d" % (local_port, container_port)
variables = ""
for variable in self.variables:
variables += "-e %s " % variable
executable_arguments = " ".join(self.executable_arguments) if self.executable_arguments is not None else CLI_ARGUMENTS
if self.get_path_arguments_to_mount is not None:
serialised_arguments_parser = base64.b64encode(dill.dumps(self.get_path_arguments_to_mount)).decode("utf-8")
calculate_additional_mounts = ("""
$("%(python_interpreter)s" "%(python_arguments_script)s" "%(serialised_arguments_parser)s" %(cli_arguments)s)
""" % {
"python_interpreter": sys.executable,
"python_arguments_script": _ARGUMENTS_TO_MOUNT_SCRIPT,
"serialised_arguments_parser": serialised_arguments_parser,
"cli_arguments": executable_arguments
}).strip()
else:
calculate_additional_mounts = ""
return """
docker %(docker_noun)s -i \\
%(name)s \\
%(detached)s \\
%(mounts)s %(calculate_additional_mounts)s \\
%(ports)s \\
%(variables)s \\
%(other_docker)s \\
%(image_or_container)s \\
%(executable)s %(executable_arguments)s
""" % {
"calculate_additional_mounts": calculate_additional_mounts,
"name": "--name %s" % self.name if self.name is not None else "",
"detached": "-d" if self.detached else "",
"mounts": mounts,
"ports": ports,
"variables": variables,
"other_docker": self.other_docker,
"docker_noun": "run" if self.image is not None else "exec",
"image_or_container": self.image if self.image is not None else self.container,
"executable": self.executable,
"executable_arguments": executable_arguments
}
class MountedArgumentParserBuilder:
"""
TODO
"""
ALL_POSITIONAL_ARGUMENTS = "*"
def __init__(self, named_arguments: Set[str]=None, positional_arguments: Union[Set[int], str]=None):
"""
TODO
:param named_arguments:
:param positional_arguments:
"""
self.named_arguments = named_arguments if named_arguments is not None else set()
self.positional_arguments = positional_arguments if positional_arguments is not None else set()
def build(self) -> Callable[[List[Any]], Set[str]]:
"""
TODO
:return:
"""
named_arguments = deepcopy(self.named_arguments)
positional_arguments = deepcopy(self.positional_arguments)
def get_mounts(cli_arguments: List[Any]) -> Set[str]:
parser = argparse.ArgumentParser()
for name in named_arguments:
parser.add_argument(name, type=str)
parser.add_argument("positional_arguments", type=str, nargs="*")
arguments, _ = parser.parse_known_args(cli_arguments)
mounts = set() # type: Set[str]
for name in named_arguments:
value = getattr(arguments, name.lstrip("-"), None)
if value is not None:
mounts.add(value)
if positional_arguments != MountedArgumentParserBuilder.ALL_POSITIONAL_ARGUMENTS:
for position in positional_arguments:
if position <= len(arguments.positional_arguments):
mounts.add(arguments.positional_arguments[position - 1])
else:
mounts = mounts.union(set(arguments.positional_arguments))
return mounts
return get_mounts
| 41.06
| 126
| 0.618282
|
4daa2236e2b03fccc0fb013b6f45ab3818a16341
| 1,443
|
py
|
Python
|
multiprocessingExercise/multiprocess_TCP_server.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
multiprocessingExercise/multiprocess_TCP_server.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
multiprocessingExercise/multiprocess_TCP_server.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
import time
import multiprocessing
import socket
class MultiProcessingSocketStreamServer(object):
def __init__(self,port,process):
self._serversocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self._serversocket.bind(('localhost',port))
self._serversocket.listen(5)
self.process=process
def _parent_main_loop(self):
while True:
time.sleep(1)
def start(self,handler):
for i in range(self.process):
p=multiprocessing.Process(target=handler,args=(self._serversocket, ))
p.daemon=True
p.start()
self._parent_main_loop()
class SocketStreamHandler(object):
def __init__(self):
self._sock=None
self._address=None
def __call__(self,serversocket):
while True:
(self._sock,self._address)=serversocket.accept()
with self:
self.hander()
def __enter__(self):
pass
def __exit__(self,exc_type,exc_value,traceback):
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
def handle(self):
raise NotImplementedError
class HelloWorldHandler(SocketStreamHandler):
def handle(self):
self._sock.send("Hello World\n")
def main():
print("Hello")
server=MultiProcessingSocketStreamServer(8080,5)
handler=HelloWorldHandler()
server.start(handler)
if __name__ == '__main__':
main()
| 25.315789
| 81
| 0.652114
|
9ef46c7cd523f2bc306f8fcae3ea66a8da8afe1b
| 2,333
|
py
|
Python
|
testing/l2/utils.py
|
milancermak/realms-contracts
|
73a6772b687bbe1775d104c1ca686ee8930ff250
|
[
"MIT"
] | 1
|
2022-01-06T05:44:44.000Z
|
2022-01-06T05:44:44.000Z
|
testing/l2/utils.py
|
milancermak/realms-contracts
|
73a6772b687bbe1775d104c1ca686ee8930ff250
|
[
"MIT"
] | null | null | null |
testing/l2/utils.py
|
milancermak/realms-contracts
|
73a6772b687bbe1775d104c1ca686ee8930ff250
|
[
"MIT"
] | null | null | null |
"""Utilities for testing Cairo contracts."""
from starkware.cairo.common.hash_state import compute_hash_on_elements
from starkware.crypto.signature.signature import private_to_stark_key, sign
from starkware.starknet.definitions.error_codes import StarknetErrorCode
from starkware.starkware_utils.error_handling import StarkException
from starkware.starknet.public.abi import get_selector_from_name
MAX_UINT256 = (2**128 - 1, 2**128 - 1)
def str_to_felt(text):
b_text = bytes(text, 'UTF-8')
return int.from_bytes(b_text, "big")
def uint(a):
return(a, 0)
async def assert_revert(fun):
try:
await fun
assert False
except StarkException as err:
_, error = err.args
assert error['code'] == StarknetErrorCode.TRANSACTION_FAILED
class Signer():
"""
Utility for sending signed transactions to an Account on Starknet.
Parameters
----------
private_key : int
Examples
---------
Constructing a Singer object
>>> signer = Signer(1234)
Sending a transaction
>>> await signer.send_transaction(account,
account.contract_address,
'set_public_key',
[other.public_key]
)
"""
def __init__(self, private_key):
self.private_key = private_key
self.public_key = private_to_stark_key(private_key)
def sign(self, message_hash):
return sign(msg_hash=message_hash, priv_key=self.private_key)
async def send_transaction(self, account, to, selector_name, calldata, nonce=None):
if nonce is None:
execution_info = await account.get_nonce().call()
nonce, = execution_info.result
selector = get_selector_from_name(selector_name)
message_hash = hash_message(
account.contract_address, to, selector, calldata, nonce)
sig_r, sig_s = self.sign(message_hash)
return await account.execute(to, selector, calldata, nonce).invoke(signature=[sig_r, sig_s])
def hash_message(sender, to, selector, calldata, nonce):
message = [
sender,
to,
selector,
compute_hash_on_elements(calldata),
nonce
]
return compute_hash_on_elements(message)
| 27.77381
| 100
| 0.645092
|
efd58e33698bceaf7a4a244af07cde3e9073ebf4
| 2,054
|
py
|
Python
|
chapter7/services/load_csv_service.py
|
andreffs18/collective-intelligence
|
6bea822faa4ac6ae3df8843f8af2420a25d6da0f
|
[
"MIT"
] | 1
|
2020-07-29T09:10:48.000Z
|
2020-07-29T09:10:48.000Z
|
chapter7/services/load_csv_service.py
|
andreffs18/collective-intelligence
|
6bea822faa4ac6ae3df8843f8af2420a25d6da0f
|
[
"MIT"
] | 3
|
2020-03-30T20:12:58.000Z
|
2022-03-11T23:18:38.000Z
|
chapter7/services/load_csv_service.py
|
andreffs18/collective-intelligence
|
6bea822faa4ac6ae3df8843f8af2420a25d6da0f
|
[
"MIT"
] | null | null | null |
class LoadCSVService:
def __init__(self, standard=False, filename="data.csv"):
self.standard = standard
self.filename = filename
def call(self):
"""
For this example purpuses, we return the book example, instead of reading an actual file.
"""
if not self.standard:
return self._baseball()
# ['Referrer', 'Location', 'Read FAQ', 'Page Viewed', 'Pricing Plan']
return [
['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)', 'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['slashdot', 'UK', 'no', 21, 'None'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']
]
def _baseball(self):
"""
From https://github.com/jayelm/decisiontrees/blob/master/example_data/baseball.csv
"""
data = """Outlook,Temperature,Humidity,Wind,Play ball?
Sunny,Hot,High,Weak,No
Sunny,Hot,High,Strong,No
Overcast,Hot,High,Weak,Yes
Rain,Mild,High,Weak,Yes
Rain,Cool,Normal,Weak,Yes
Rain,Cool,Normal,Strong,No
Overcast,Cool,Normal,Strong,Yes
Sunny,Mild,High,Weak,No
Sunny,Cool,Normal,Weak,Yes
Rain,Mild,Normal,Weak,Yes
Sunny,Mild,Normal,Strong,Yes
Overcast,Mild,High,Strong,Yes
Overcast,Hot,Normal,Weak,Yes
Rain,Mild,High,Strong,No"""
# clean data
data = data.split("\n")
data = filter(None, data)
data = map(lambda d: d.strip().split(","), data)
return data[1:]
| 36.035088
| 97
| 0.539435
|
7869e56db8e9e99a47f188ba0a74ad3f725f19e2
| 1,811
|
py
|
Python
|
tests/rekey.py
|
prakashngit/CCF
|
3ae6e3d4074f424a85ee062aa1ed584e7e17f72f
|
[
"Apache-2.0"
] | null | null | null |
tests/rekey.py
|
prakashngit/CCF
|
3ae6e3d4074f424a85ee062aa1ed584e7e17f72f
|
[
"Apache-2.0"
] | null | null | null |
tests/rekey.py
|
prakashngit/CCF
|
3ae6e3d4074f424a85ee062aa1ed584e7e17f72f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.ccf
import infra.jsonrpc
import infra.notification
import suite.test_requirements as reqs
import infra.e2e_args
import time
from loguru import logger as LOG
@reqs.description("Rekey the ledger once")
@reqs.supports_methods("mkSign")
@reqs.at_least_n_nodes(1)
def test(network, args):
primary, _ = network.find_primary()
# Retrieve current index version to check for sealed secrets later
with primary.node_client() as nc:
check_commit = infra.checker.Checker(nc)
res = nc.rpc("mkSign", params={})
check_commit(res, result=True)
version_before_rekey = res.commit
network.consortium.rekey_ledger(member_id=1, remote_node=primary)
network.wait_for_sealed_secrets_at_version(version_before_rekey)
return network
# Run some write transactions against the logging app
def record_transactions(primary, txs_count=1):
with primary.node_client() as nc:
check_commit = infra.checker.Checker(nc)
with primary.user_client() as c:
for i in range(1, txs_count):
check_commit(
c.rpc("LOG_record", {"id": i, "msg": f"entry #{i}"}), result=True
)
def run(args):
hosts = ["localhost", "localhost"]
with infra.ccf.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb,
) as network:
network.start_and_join(args)
primary, _ = network.find_primary()
record_transactions(primary)
test(network, args)
record_transactions(primary)
if __name__ == "__main__":
args = infra.e2e_args.cli_args()
args.package = args.app_script and "libluageneric" or "liblogging"
run(args)
| 28.746032
| 85
| 0.687465
|
a76968da99d789a1c033841a3a6cd3b4d0776af7
| 7,665
|
py
|
Python
|
networkx/release.py
|
nadesai/networkx
|
ca2df82141cf6977c9d59af2d0bfbc990e2aabce
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/release.py
|
nadesai/networkx
|
ca2df82141cf6977c9d59af2d0bfbc990e2aabce
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/release.py
|
nadesai/networkx
|
ca2df82141cf6977c9d59af2d0bfbc990e2aabce
|
[
"BSD-3-Clause"
] | null | null | null |
"""Release data for NetworkX.
When NetworkX is imported a number of steps are followed to determine
the version information.
1) If the release is not a development release (dev=False), then version
information is read from version.py, a file containing statically
defined version information. This file should exist on every
downloadable release of NetworkX since setup.py creates it during
packaging/installation. However, version.py might not exist if one
is running NetworkX from the mercurial repository. In the event that
version.py does not exist, then no vcs information will be available.
2) If the release is a development release, then version information
is read dynamically, when possible. If no dynamic information can be
read, then an attempt is made to read the information from version.py.
If version.py does not exist, then no vcs information will be available.
Clarification:
version.py is created only by setup.py
When setup.py creates version.py, it does so before packaging/installation.
So the created file is included in the source distribution. When a user
downloads a tar.gz file and extracts the files, the files will not be in a
live version control repository. So when the user runs setup.py to install
NetworkX, we must make sure write_versionfile() does not overwrite the
revision information contained in the version.py that was included in the
tar.gz file. This is why write_versionfile() includes an early escape.
"""
# Copyright (C) 2004-2017 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from __future__ import absolute_import
import os
import sys
import time
import datetime
basedir = os.path.abspath(os.path.split(__file__)[0])
def write_versionfile():
"""Creates a static file containing version information."""
versionfile = os.path.join(basedir, 'version.py')
text = '''"""
Version information for NetworkX, created during installation.
Do not add this file to the repository.
"""
import datetime
version = %(version)r
date = %(date)r
# Was NetworkX built from a development version? If so, remember that the major
# and minor versions reference the "target" (rather than "current") release.
dev = %(dev)r
# Format: (name, major, min, revision)
version_info = %(version_info)r
# Format: a 'datetime.datetime' instance
date_info = %(date_info)r
# Format: (vcs, vcs_tuple)
vcs_info = %(vcs_info)r
'''
# Try to update all information
date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
def writefile():
fh = open(versionfile, 'w')
subs = {
'dev': dev,
'version': version,
'version_info': version_info,
'date': date,
'date_info': date_info,
'vcs_info': vcs_info
}
fh.write(text % subs)
fh.close()
if vcs_info[0] == 'mercurial':
# Then, we want to update version.py.
writefile()
else:
if os.path.isfile(versionfile):
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
sys.path.insert(0, basedir)
from version import version
del sys.path[0]
else:
# This is *bad*. It means the user might have a tarball that
# does not include version.py. Let this error raise so we can
# fix the tarball.
##raise Exception('version.py not found!')
# We no longer require that prepared tarballs include a version.py
# So we use the possibly trunctated value from get_info()
# Then we write a new file.
writefile()
return version
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
# Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# This is where most final releases of NetworkX will be.
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info, vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, revision)
return date, date_info, version, version_info, vcs_info
# Version information
name = 'networkx'
major = "2"
minor = "0"
# Declare current release as a development release.
# Change to False before tagging a release; then change back.
dev = True
description = "Python package for creating and manipulating graphs and networks"
long_description = \
"""
NetworkX is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
"""
license = 'BSD'
authors = {'Hagberg': ('Aric Hagberg', 'hagberg@lanl.gov'),
'Schult': ('Dan Schult', 'dschult@colgate.edu'),
'Swart': ('Pieter Swart', 'swart@lanl.gov')
}
maintainer = "NetworkX Developers"
maintainer_email = "networkx-discuss@googlegroups.com"
url = 'http://networkx.github.io/'
download_url = 'https://pypi.python.org/pypi/networkx/'
platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
keywords = ['Networks', 'Graph Theory', 'Mathematics',
'network', 'graph', 'discrete mathematics', 'math']
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics']
date, date_info, version, version_info, vcs_info = get_info()
if __name__ == '__main__':
# Write versionfile for nightly snapshots.
write_versionfile()
| 33.181818
| 80
| 0.662883
|
a3d3ccab3479e41072a9c6e915e8e5d7e0fd3689
| 1,645
|
py
|
Python
|
ebnf/primitives.py
|
shixiongfei/ebnf
|
40f9055d240c741a248a2f797109c2b08a8c46c8
|
[
"Apache-2.0"
] | 1
|
2021-02-02T11:38:06.000Z
|
2021-02-02T11:38:06.000Z
|
ebnf/primitives.py
|
shixiongfei/ebnf
|
40f9055d240c741a248a2f797109c2b08a8c46c8
|
[
"Apache-2.0"
] | null | null | null |
ebnf/primitives.py
|
shixiongfei/ebnf
|
40f9055d240c741a248a2f797109c2b08a8c46c8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
class Statement:
def asString(self):
raise Exception("Missing `asString` method")
def __repr__(self):
return self.asString()
def __str__(self):
return self.asString()
class Optional(Statement):
def __init__(self, token):
super().__init__()
self.token = token
def asString(self):
return "Optional({0})".format(self.token.asString())
class Repetition(Statement):
def __init__(self, token):
super().__init__()
self.token = token
def asString(self):
return "Repetition({0})".format(self.token.asString())
class Grouping(Statement):
def __init__(self, token):
super().__init__()
self.token = token
def asString(self):
return "Grouping({0})".format(self.token.asString())
class Alternation(Statement):
def __init__(self, lhs, rhs):
super().__init__()
self.lhs = lhs
self.rhs = rhs
def asString(self):
return "Alternation({0}, {1})".format(
self.lhs.asString(), self.rhs.asString()
)
class Concatenation(Statement):
def __init__(self, lhs, rhs):
super().__init__()
self.lhs = lhs
self.rhs = rhs
def asString(self):
return "Concatenation({0}, {1})".format(
self.lhs.asString(), self.rhs.asString()
)
class Rule(Statement):
def __init__(self, rule, stmt, *args, **kwargs):
self.rule = rule
self.stmt = stmt
def asString(self):
return "Rule({0} = {1})".format(
self.rule.asString(), self.stmt.asString()
)
| 21.933333
| 62
| 0.578116
|
d85738012dd2e98d4a9e2b48ea553bbba4289afd
| 8,007
|
py
|
Python
|
pymc3/tests/test_diagnostics.py
|
vpolisky/pymc3
|
87cdd712c86321121c2ed3150764f3d847f5083c
|
[
"Apache-2.0"
] | null | null | null |
pymc3/tests/test_diagnostics.py
|
vpolisky/pymc3
|
87cdd712c86321121c2ed3150764f3d847f5083c
|
[
"Apache-2.0"
] | null | null | null |
pymc3/tests/test_diagnostics.py
|
vpolisky/pymc3
|
87cdd712c86321121c2ed3150764f3d847f5083c
|
[
"Apache-2.0"
] | 1
|
2021-04-13T10:31:26.000Z
|
2021-04-13T10:31:26.000Z
|
import numpy as np
from numpy.testing import assert_allclose, assert_array_less
from .helpers import SeededTest
from ..model import Model
from ..step_methods import Slice, Metropolis, NUTS
from ..distributions import Normal
from ..tuning import find_MAP
from ..sampling import sample
from ..diagnostics import effective_n, geweke, gelman_rubin
from .test_examples import build_disaster_model
class TestGelmanRubin(SeededTest):
good_ratio = 1.1
def get_ptrace(self, n_samples):
model = build_disaster_model()
with model:
# Run sampler
step1 = Slice([model.early_mean_log_, model.late_mean_log_])
step2 = Metropolis([model.switchpoint])
start = {'early_mean': 7., 'late_mean': 1., 'switchpoint': 100}
ptrace = sample(n_samples, step=[step1, step2], start=start, njobs=2,
progressbar=False, random_seed=[20090425, 19700903])
return ptrace
def test_good(self):
"""Confirm Gelman-Rubin statistic is close to 1 for a reasonable number of samples."""
n_samples = 1000
rhat = gelman_rubin(self.get_ptrace(n_samples))
self.assertTrue(all(1 / self.good_ratio < r <
self.good_ratio for r in rhat.values()))
def test_bad(self):
"""Confirm Gelman-Rubin statistic is far from 1 for a small number of samples."""
n_samples = 10
rhat = gelman_rubin(self.get_ptrace(n_samples))
self.assertFalse(all(1 / self.good_ratio < r <
self.good_ratio for r in rhat.values()))
def test_right_shape_python_float(self, shape=None, test_shape=None):
"""Check Gelman-Rubin statistic shape is correct w/ python float"""
n_jobs = 3
n_samples = 10
with Model():
if shape is not None:
Normal('x', 0, 1., shape=shape)
else:
Normal('x', 0, 1.)
# start sampling at the MAP
start = find_MAP()
step = NUTS(scaling=start)
ptrace = sample(n_samples, step=step, start=start,
njobs=n_jobs, random_seed=42)
rhat = gelman_rubin(ptrace)['x']
if test_shape is None:
test_shape = shape
if shape is None or shape == ():
self.assertTrue(isinstance(rhat, float))
else:
self.assertTrue(isinstance(rhat, np.ndarray))
self.assertEqual(rhat.shape, test_shape)
def test_right_shape_scalar_tuple(self):
"""Check Gelman-Rubin statistic shape is correct w/ scalar as shape=()"""
self.test_right_shape_python_float(shape=())
def test_right_shape_tensor(self, shape=(5, 3, 2), test_shape=None):
"""Check Gelman-Rubin statistic shape is correct w/ tensor variable"""
self.test_right_shape_python_float(shape=(5, 3, 2))
def test_right_shape_scalar_array(self):
"""Check Gelman-Rubin statistic shape is correct w/ scalar as shape=(1,)"""
self.test_right_shape_python_float(shape=(1,))
def test_right_shape_scalar_one(self):
"""Check Gelman-Rubin statistic shape is correct w/ scalar as shape=1"""
self.test_right_shape_python_float(shape=1, test_shape=(1,))
class TestDiagnostics(SeededTest):
def get_switchpoint(self, n_samples):
model = build_disaster_model()
with model:
# Run sampler
step1 = Slice([model.early_mean_log_, model.late_mean_log_])
step2 = Metropolis([model.switchpoint])
trace = sample(n_samples, step=[step1, step2], progressbar=False, random_seed=1)
return trace['switchpoint']
def test_geweke_negative(self):
"""Confirm Geweke diagnostic is larger than 1 for a small number of samples."""
n_samples = 200
n_intervals = 20
switchpoint = self.get_switchpoint(n_samples)
first = 0.1
last = 0.7
# returns (intervalsx2) matrix, with first row start indexes, second
# z-scores
z_switch = geweke(switchpoint, first=first,
last=last, intervals=n_intervals)
# These z-scores should be larger, since there are not many samples.
self.assertGreater(max(abs(z_switch[:, 1])), 1)
def test_geweke_positive(self):
"""Confirm Geweke diagnostic is smaller than 1 for a reasonable number of samples."""
n_samples = 2000
n_intervals = 20
switchpoint = self.get_switchpoint(n_samples)
with self.assertRaises(ValueError):
# first and last must be between 0 and 1
geweke(switchpoint, first=-0.3, last=1.1, intervals=n_intervals)
with self.assertRaises(ValueError):
# first and last must add to < 1
geweke(switchpoint, first=0.3, last=0.7, intervals=n_intervals)
first = 0.1
last = 0.7
# returns (intervalsx2) matrix, with first row start indexes, second
# z-scores
z_switch = geweke(switchpoint, first=first,
last=last, intervals=n_intervals)
start = z_switch[:, 0]
z_scores = z_switch[:, 1]
# Ensure `intervals` argument is honored
self.assertEqual(z_switch.shape[0], n_intervals)
# Start index should not be in the last <last>% of samples
assert_array_less(start, (1 - last) * n_samples)
# These z-scores should be small, since there are more samples.
self.assertLess(max(abs(z_scores)), 1)
def test_effective_n(self):
"""Check effective sample size is equal to number of samples when initializing with MAP"""
n_jobs = 3
n_samples = 100
with Model():
Normal('x', 0, 1., shape=5)
# start sampling at the MAP
start = find_MAP()
step = NUTS(scaling=start)
ptrace = sample(n_samples, step=step, start=start,
njobs=n_jobs, random_seed=42)
n_effective = effective_n(ptrace)['x']
assert_allclose(n_effective, n_jobs * n_samples, 2)
def test_effective_n_right_shape_python_float(self,
shape=None, test_shape=None):
"""Check effective sample size shape is correct w/ python float"""
n_jobs = 3
n_samples = 10
with Model():
if shape is not None:
Normal('x', 0, 1., shape=shape)
else:
Normal('x', 0, 1.)
# start sampling at the MAP
start = find_MAP()
step = NUTS(scaling=start)
ptrace = sample(n_samples, step=step, start=start,
njobs=n_jobs, random_seed=42)
n_effective = effective_n(ptrace)['x']
if test_shape is None:
test_shape = shape
if shape is None or shape == ():
self.assertTrue(isinstance(n_effective, float))
else:
self.assertTrue(isinstance(n_effective, np.ndarray))
self.assertEqual(n_effective.shape, test_shape)
def test_effective_n_right_shape_scalar_tuple(self):
"""Check effective sample size shape is correct w/ scalar as shape=()"""
self.test_effective_n_right_shape_python_float(shape=())
def test_effective_n_right_shape_tensor(self):
"""Check effective sample size shape is correct w/ tensor variable"""
self.test_effective_n_right_shape_python_float(shape=(5, 3, 2))
def test_effective_n_right_shape_scalar_array(self):
"""Check effective sample size shape is correct w/ scalar as shape=(1,)"""
self.test_effective_n_right_shape_python_float(shape=(1,))
def test_effective_n_right_shape_scalar_one(self):
"""Check effective sample size shape is correct w/ scalar as shape=1"""
self.test_effective_n_right_shape_python_float(shape=1,
test_shape=(1,))
| 38.681159
| 98
| 0.618084
|
3930f53c3d0d9dd3ab97c09e39f1837b53eda821
| 13,637
|
py
|
Python
|
adaptive_lr_decay/adaptive_fed_avg_test.py
|
Abhin02/federated
|
5fd8f69284c2784b635faadfaf6c66ce843f7701
|
[
"Apache-2.0"
] | 1
|
2022-03-16T02:13:39.000Z
|
2022-03-16T02:13:39.000Z
|
adaptive_lr_decay/adaptive_fed_avg_test.py
|
notminusone/federated
|
6a709f5598450232b918c046cfeba849f479d5cb
|
[
"Apache-2.0"
] | null | null | null |
adaptive_lr_decay/adaptive_fed_avg_test.py
|
notminusone/federated
|
6a709f5598450232b918c046cfeba849f479d5cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for federated averaging with learning rate callbacks."""
import collections
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from adaptive_lr_decay import adaptive_fed_avg
from adaptive_lr_decay import callbacks
def _create_client_data(num_batches=2):
# Create data for y = 3 * x + 1
x = [[0.0], [1.0]]
y = [[1.0], [4.0]]
# Create a dataset of 4 examples (2 batches of two examples).
return tf.data.Dataset.from_tensor_slices(collections.OrderedDict(
x=x, y=y)).repeat().batch(2).take(num_batches)
def _uncompiled_model_builder():
keras_model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(1,)),
tf.keras.layers.Dense(
units=1, kernel_initializer='zeros', bias_initializer='zeros')
])
loss_fn = tf.keras.losses.MeanSquaredError()
input_spec = _create_client_data().element_spec
return tff.learning.from_keras_model(
keras_model=keras_model, input_spec=input_spec, loss=loss_fn)
class AdaptiveFedAvgTest(tf.test.TestCase):
def _run_rounds(self, iterative_process, num_rounds):
client_datasets = [
_create_client_data(num_batches=1),
_create_client_data(num_batches=2)
]
train_outputs = []
state = iterative_process.initialize()
for round_num in range(num_rounds):
state, metrics = iterative_process.next(state, client_datasets)
train_outputs.append(metrics)
logging.info('Round %d: %s', round_num, metrics)
logging.info('Model: %s', state.model)
return state, train_outputs
def _run_rounds_tff_fedavg(self, iterative_process, num_rounds):
client_datasets = [
_create_client_data(num_batches=1),
_create_client_data(num_batches=2)
]
train_outputs = []
state = iterative_process.initialize()
for round_num in range(num_rounds):
state, outputs = iterative_process.next(state, client_datasets)
logging.info('Round %d: %s', round_num, outputs)
logging.info('Model: %s', state.model)
train_outputs.append(outputs['train'])
return state, train_outputs
def test_comparable_to_fed_avg(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
reference_iterative_process = tff.learning.build_federated_averaging_process(
_uncompiled_model_builder,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.1),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(1.0))
_, train_outputs = self._run_rounds(iterative_process, 5)
_, reference_train_outputs = self._run_rounds_tff_fedavg(
reference_iterative_process, 5)
for i in range(5):
self.assertAllClose(train_outputs[i]['during_training']['loss'],
reference_train_outputs[i]['loss'], 1e-4)
def test_fed_avg_without_decay_decreases_loss(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
state, train_outputs = self._run_rounds(iterative_process, 5)
self.assertLess(train_outputs[-1]['before_training']['loss'],
train_outputs[0]['before_training']['loss'])
self.assertLess(train_outputs[-1]['during_training']['loss'],
train_outputs[0]['during_training']['loss'])
self.assertNear(state.client_lr_callback.learning_rate, 0.1, 1e-8)
self.assertNear(state.server_lr_callback.learning_rate, 0.1, 1e-8)
def test_fed_avg_with_client_decay_decreases_loss(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
window_size=1,
min_delta=0.5,
min_lr=0.05,
decay_factor=0.5,
patience=1,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1, window_size=1, decay_factor=1.0, cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
state, train_outputs = self._run_rounds(iterative_process, 10)
self.assertLess(train_outputs[-1]['before_training']['loss'],
train_outputs[0]['before_training']['loss'])
self.assertLess(train_outputs[-1]['during_training']['loss'],
train_outputs[0]['during_training']['loss'])
self.assertNear(state.client_lr_callback.learning_rate, 0.05, 1e-8)
self.assertNear(state.server_lr_callback.learning_rate, 0.1, 1e-8)
def test_fed_avg_with_server_decay_decreases_loss(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
window_size=1,
patience=1,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
window_size=1,
patience=1,
decay_factor=0.5,
min_delta=0.5,
min_lr=0.05,
cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
state, train_outputs = self._run_rounds(iterative_process, 10)
self.assertLess(train_outputs[-1]['before_training']['loss'],
train_outputs[0]['before_training']['loss'])
self.assertLess(train_outputs[-1]['during_training']['loss'],
train_outputs[0]['during_training']['loss'])
self.assertNear(state.client_lr_callback.learning_rate, 0.1, 1e-8)
self.assertNear(state.server_lr_callback.learning_rate, 0.05, 1e-8)
def test_fed_sgd_without_decay_decreases_loss(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.0,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
state, train_outputs = self._run_rounds(iterative_process, 5)
self.assertLess(train_outputs[-1]['before_training']['loss'],
train_outputs[0]['before_training']['loss'])
self.assertLess(train_outputs[-1]['during_training']['loss'],
train_outputs[0]['during_training']['loss'])
self.assertNear(state.client_lr_callback.learning_rate, 0.0, 1e-8)
self.assertNear(state.server_lr_callback.learning_rate, 0.1, 1e-8)
def test_small_lr_comparable_zero_lr(self):
client_lr_callback1 = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.0,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
client_lr_callback2 = callbacks.create_reduce_lr_on_plateau(
learning_rate=1e-8,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
iterative_process1 = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback1,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
iterative_process2 = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback2,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
state1, train_outputs1 = self._run_rounds(iterative_process1, 5)
state2, train_outputs2 = self._run_rounds(iterative_process2, 5)
self.assertAllClose(state1.model.trainable, state2.model.trainable, 1e-4)
self.assertAllClose(train_outputs1, train_outputs2, 1e-4)
def test_iterative_process_type_signature(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
min_delta=0.5,
window_size=2,
decay_factor=1.0,
cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
lr_callback_type = tff.framework.type_from_tensors(client_lr_callback)
server_state_type = tff.FederatedType(
adaptive_fed_avg.ServerState(
model=tff.learning.ModelWeights(
trainable=(tff.TensorType(tf.float32, [1, 1]),
tff.TensorType(tf.float32, [1])),
non_trainable=()),
optimizer_state=[tf.int64],
client_lr_callback=lr_callback_type,
server_lr_callback=lr_callback_type), tff.SERVER)
self.assertEqual(iterative_process.initialize.type_signature,
tff.FunctionType(parameter=None, result=server_state_type))
dataset_type = tff.FederatedType(
tff.SequenceType(
collections.OrderedDict(
x=tff.TensorType(tf.float32, [None, 1]),
y=tff.TensorType(tf.float32, [None, 1]))), tff.CLIENTS)
metrics_type = tff.FederatedType(
collections.OrderedDict(
loss=tff.TensorType(tf.float32),
num_examples=tff.TensorType(tf.int64),
num_batches=tff.TensorType(tf.int64)), tff.SERVER)
output_type = collections.OrderedDict(
before_training=metrics_type, during_training=metrics_type)
expected_result_type = (server_state_type, output_type)
expected_type = tff.FunctionType(
parameter=collections.OrderedDict(
server_state=server_state_type, federated_dataset=dataset_type),
result=expected_result_type)
actual_type = iterative_process.next.type_signature
self.assertEqual(
actual_type,
expected_type,
msg='{s}\n!={t}'.format(s=actual_type, t=expected_type))
def test_get_model_weights(self):
client_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
window_size=1,
patience=1,
decay_factor=1.0,
cooldown=0)
server_lr_callback = callbacks.create_reduce_lr_on_plateau(
learning_rate=0.1,
window_size=1,
patience=1,
decay_factor=1.0,
cooldown=0)
iterative_process = adaptive_fed_avg.build_fed_avg_process(
_uncompiled_model_builder,
client_lr_callback,
server_lr_callback,
client_optimizer_fn=tf.keras.optimizers.SGD,
server_optimizer_fn=tf.keras.optimizers.SGD)
state = iterative_process.initialize()
self.assertIsInstance(
iterative_process.get_model_weights(state), tff.learning.ModelWeights)
self.assertAllClose(state.model.trainable,
iterative_process.get_model_weights(state).trainable)
state, _ = self._run_rounds(iterative_process, 5)
self.assertIsInstance(
iterative_process.get_model_weights(state), tff.learning.ModelWeights)
self.assertAllClose(state.model.trainable,
iterative_process.get_model_weights(state).trainable)
if __name__ == '__main__':
tf.test.main()
| 36.95664
| 81
| 0.694948
|
bee96d2e8c0cf7b73f3866f1f2b50d8c6a1096b8
| 11,752
|
py
|
Python
|
pyopenvidu/openvidusession.py
|
PerchLive/pyopenvidu
|
fa8ac55b446c94a026dbd73d67cc090810a8c3f1
|
[
"MIT"
] | null | null | null |
pyopenvidu/openvidusession.py
|
PerchLive/pyopenvidu
|
fa8ac55b446c94a026dbd73d67cc090810a8c3f1
|
[
"MIT"
] | null | null | null |
pyopenvidu/openvidusession.py
|
PerchLive/pyopenvidu
|
fa8ac55b446c94a026dbd73d67cc090810a8c3f1
|
[
"MIT"
] | null | null | null |
"""OpenViduSession class."""
from typing import Iterator, List
from datetime import datetime
from threading import RLock
from requests_toolbelt.sessions import BaseUrlSession
from .exceptions import OpenViduSessionDoesNotExistsError, OpenViduConnectionDoesNotExistsError, OpenViduError
from .openviduconnection import OpenViduConnection
class OpenViduSession(object):
"""
This object represents an OpenVidu Session.
A session is a group of users sharing communicating each other.
"""
def __init__(self, session: BaseUrlSession, lock: RLock, data: dict):
"""
This is meant for internal use, thus you should not call it.
Use `OpenVidu.get_session` to get an instance of this class.
"""
self._session = session
self._data = data
self._lock = lock # Sadly using this lock locks all other session objects as well
def fetch(self):
"""
Updates every property of the OpenViduSession with the current status it has in OpenVidu Server. This is especially useful for getting the list of active connections to the OpenViduSession (get_connections()).
To update every OpenViduSession object owned by OpenVidu object, call OpenVidu.fetch()
:return: True if the OpenViduSession status has changed with respect to the server, False if not. This applies to any property or sub-property of the object
"""
with self._lock:
r = self._session.get(f"api/sessions/{self.id}")
if r.status_code == 404:
self._data = {}
raise OpenViduSessionDoesNotExistsError()
r.raise_for_status()
is_changed = self._data != r.json()
if is_changed:
self._data = r.json()
return is_changed
def close(self):
"""
Gracefully closes the Session: unpublishes all streams and evicts every participant.
Further calls to this object will fail.
"""
with self._lock:
r = self._session.delete(f"/api/sessions/{self.id}")
if r.status_code == 404:
self._data = {}
raise OpenViduSessionDoesNotExistsError()
r.raise_for_status()
self._data = {}
@property
def is_valid(self) -> bool:
"""
Checks if this session still existed on the server by the last call to fetch().
:return: True if the session exists. False otherwise.
"""
with self._lock:
return bool(self._data)
def generate_token(self, role: str = 'PUBLISHER', data: str = None, video_max_recv_bandwidth: int = None,
video_min_recv_bandwidth: int = None, video_max_send_bandwidth: int = None,
video_min_send_bandwidth: int = None, allowed_filters: list = None) -> str:
"""
Gets a new token associated to Session.
In the video bandwidth settings 0 means unconstrained. Setting any of them (other than None) overrides the values configured in for the server.
https://docs.openvidu.io/en/2.12.0/reference-docs/REST-API/#post-apitokens
:param role: Allowed values: `SUBSCRIBER`, `PUBLISHER` or `MODERATOR`
:param data: metadata associated to this token (usually participant's information)
:param video_max_recv_bandwidth: Maximum number of Kbps that the client owning the token will be able to receive from Kurento Media Server.
:param video_min_recv_bandwidth: Minimum number of Kbps that the client owning the token will try to receive from Kurento Media Server.
:param video_max_send_bandwidth: Maximum number of Kbps that the client owning the token will be able to send to Kurento Media Server.
:param video_min_send_bandwidth: Minimum number of Kbps that the client owning the token will try to send to Kurento Media Server.
:param allowed_filters: Array of strings containing the names of the filters the user owning the token will be able to apply.
:return: The token as String.
"""
with self._lock:
if not self._data: # Fail early... and always
raise OpenViduSessionDoesNotExistsError()
# Prepare parameters
if role not in ['SUBSCRIBER', 'PUBLISHER', 'MODERATOR']:
raise ValueError(f"Role must be any of SUBSCRIBER, PUBLISHER or MODERATOR, not {role}")
parameters = {"session": self.id, "role": role}
if data:
parameters['data'] = data
kurento_options = {
"videoMaxRecvBandwidth": video_max_recv_bandwidth,
"videoMinRecvBandwidth": video_min_recv_bandwidth,
"videoMaxSendBandwidth": video_max_send_bandwidth,
"videoMinSendBandwidth": video_min_send_bandwidth,
"allowedFilters": allowed_filters
}
kurento_options = {k: v for k, v in kurento_options.items() if v is not None}
if kurento_options:
parameters['kurentoOptions'] = kurento_options
# send request
r = self._session.post('api/tokens', json=parameters)
if r.status_code == 404:
raise OpenViduSessionDoesNotExistsError()
elif r.status_code == 400:
raise ValueError()
return r.json()['token']
@property
def connections(self) -> Iterator[OpenViduConnection]:
"""
Returns the list of active connections to the session.
:return: A generator for OpenViduConnection objects.
"""
with self._lock:
if not self._data:
raise OpenViduSessionDoesNotExistsError()
for connection_info in self._data['connections']['content']:
yield OpenViduConnection(self._session, self.id, connection_info)
def get_connection(self, connection_id: str) -> OpenViduConnection:
"""
Get a currently active connection to the server.
:param connection_id: Connection id.
:return: A OpenViduConnection objects.
"""
with self._lock:
if not self._data:
raise OpenViduSessionDoesNotExistsError()
for connection_info in self._data['connections']['content']:
if connection_info['connectionId'] == connection_id:
return OpenViduConnection(self._session, self.id, connection_info)
raise OpenViduConnectionDoesNotExistsError()
def signal(self, type_: str = None, data: str = None, to: List[OpenViduConnection] = None):
"""
Sends a signal to all participants in the session or specific connections if the `to` property defined.
OpenViduConnection objects also implement this method.
https://docs.openvidu.io/en/2.12.0/reference-docs/REST-API/#post-apisignal
:param type_: Type of the signal. In the body example of the table above, only users subscribed to Session.on('signal:MY_TYPE') will trigger that signal. Users subscribed to Session.on('signal') will trigger signals of any type.
:param data: Actual data of the signal.
:param to: List of OpenViduConnection objects to which you want to send the signal. If this property is not set (None) the signal will be sent to all participants of the session.
"""
with self._lock:
if not self._data: # Fail early... and always
raise OpenViduSessionDoesNotExistsError()
if to:
recipient_list = [connection.id for connection in to]
else:
recipient_list = None
parameters = {
"session": self.id,
"to": recipient_list,
"type": type_,
"data": data
}
parameters = {k: v for k, v in parameters.items() if v is not None}
# send request
r = self._session.post('api/signal', json=parameters)
if r.status_code == 404:
raise OpenViduSessionDoesNotExistsError()
elif r.status_code == 400:
raise ValueError()
elif r.status_code == 406:
raise OpenViduConnectionDoesNotExistsError()
r.raise_for_status()
def publish(self, rtsp_uri: str, data: str = '', adaptive_bitrate: bool = True,
only_play_with_subscribers: bool = True, type_: str = "IPCAM") -> OpenViduConnection:
"""
Publishes a new IPCAM rtsp stream to the session.
Unlike `OpenVidu.create_session` this method does not call fetch() automatically, since the server returns enough data to construct the connection object.
Keep in mind, that if you want the newly created Connection to appear in the `connections` list, you should call fetch() before accessing the list!
https://docs.openvidu.io/en/2.12.0/reference-docs/REST-API/#post-apisessionsltsession_idgtconnection
:param rtsp_uri: RTSP URI of the IP camera. For example: `rtsp://your.camera.ip:7777/path`.
:param data: Metadata you want to associate to the camera's participant.
:param adaptive_bitrate: Whether to use adaptive bitrate or not.
:param only_play_with_subscribers: Enable the IP camera stream only when some user is subscribed to it.
:param type_: Which type of stream will be published. Defaults to `IPCAM`.
:return: An OpenVidu connection object represents the newly created connection.
"""
with self._lock:
if not self._data: # Fail early... and always
raise OpenViduSessionDoesNotExistsError()
parameters = {
"type": type_,
"rtspUri": rtsp_uri,
"adaptativeBitrate": adaptive_bitrate,
"onlyPlayWithSubscribers": only_play_with_subscribers,
"data": data
}
r = self._session.post(f'api/sessions/{self.id}/connection', json=parameters)
if r.status_code == 404:
raise OpenViduSessionDoesNotExistsError()
elif r.status_code == 400:
raise ValueError()
elif r.status_code == 500:
raise OpenViduError(r.content)
return OpenViduConnection(self._session, self.id, r.json())
@property
def connection_count(self) -> int:
"""
Get the number of active connections to the session.
:return: The number of active connections.
"""
with self._lock:
if not self._data:
raise OpenViduSessionDoesNotExistsError()
return self._data['connections']['numberOfElements']
@property
def id(self) -> str:
"""
:return: The ID of this session.
"""
with self._lock:
if not self._data:
raise OpenViduSessionDoesNotExistsError()
return self._data['sessionId']
@property
def created_at(self) -> datetime:
"""
:return: datetime object when the session was created in UTC time.
"""
with self._lock:
return datetime.utcfromtimestamp(self._data['createdAt'] / 1000.0)
@property
def is_being_recorded(self) -> bool:
"""
:return: True if the session is being recorded. False otherwise.
"""
with self._lock:
return self._data['recording']
@property
def media_mode(self) -> str:
"""
:return: Media mode configured for the session ('ROUTED' or 'RELAYED').
"""
with self._lock:
return self._data['mediaMode']
| 40.38488
| 236
| 0.626617
|
023055b09f366f541a2ec3b1fc2864d5c66a11db
| 12,601
|
py
|
Python
|
nova/tests/unit/db/test_sqlalchemy_migration.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/db/test_sqlalchemy_migration.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/db/test_sqlalchemy_migration.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import mock
import uuid
from migrate import exceptions as versioning_exceptions
from migrate import UniqueConstraint
from migrate.versioning import api as versioning_api
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from nova import context
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import migration
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
class TestNullInstanceUuidScanDB(test.TestCase):
# NOTE(mriedem): Copied from the 267 database migration.
def downgrade(self, migrate_engine):
UniqueConstraint('uuid',
table=db_utils.get_table(migrate_engine, 'instances'),
name='uniq_instances0uuid').drop()
for table_name in ('instances', 'shadow_instances'):
table = db_utils.get_table(migrate_engine, table_name)
table.columns.uuid.alter(nullable=True)
def setUp(self):
super(TestNullInstanceUuidScanDB, self).setUp()
self.engine = db_api.get_engine()
# When this test runs, we've already run the schema migration to make
# instances.uuid non-nullable, so we have to alter the table here
# so we can test against a real database.
self.downgrade(self.engine)
# Now create fake entries in the fixed_ips, consoles and
# instances table where (instance_)uuid is None for testing.
for table_name in ('fixed_ips', 'instances', 'consoles'):
table = db_utils.get_table(self.engine, table_name)
fake_record = {'id': 1}
table.insert().execute(fake_record)
def test_db_null_instance_uuid_scan_readonly(self):
results = migration.db_null_instance_uuid_scan(delete=False)
self.assertEqual(1, results.get('instances'))
self.assertEqual(1, results.get('consoles'))
# The fixed_ips table should be ignored.
self.assertNotIn('fixed_ips', results)
# Now pick a random table with an instance_uuid column and show it's
# in the results but with 0 hits.
self.assertEqual(0, results.get('instance_info_caches'))
# Make sure nothing was deleted.
for table_name in ('fixed_ips', 'instances', 'consoles'):
table = db_utils.get_table(self.engine, table_name)
record = table.select(table.c.id == 1).execute().first()
self.assertIsNotNone(record)
def test_db_null_instance_uuid_scan_delete(self):
results = migration.db_null_instance_uuid_scan(delete=True)
self.assertEqual(1, results.get('instances'))
self.assertEqual(1, results.get('consoles'))
# The fixed_ips table should be ignored.
self.assertNotIn('fixed_ips', results)
# Now pick a random table with an instance_uuid column and show it's
# in the results but with 0 hits.
self.assertEqual(0, results.get('instance_info_caches'))
# Make sure fixed_ips wasn't touched, but instances and instance_faults
# records were deleted.
fixed_ips = db_utils.get_table(self.engine, 'fixed_ips')
record = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNotNone(record)
consoles = db_utils.get_table(self.engine, 'consoles')
record = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(record)
instances = db_utils.get_table(self.engine, 'instances')
record = instances.select(instances.c.id == 1).execute().first()
self.assertIsNone(record)
@mock.patch.object(migration, 'db_version', return_value=2)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(versioning_api, 'upgrade')
@mock.patch.object(versioning_api, 'downgrade')
@mock.patch.object(migration, 'get_engine', return_value='engine')
class TestDbSync(test.NoDBTestCase):
def test_version_none(self, mock_get_engine, mock_downgrade, mock_upgrade,
mock_find_repo, mock_version):
database = 'fake'
migration.db_sync(database=database)
mock_version.assert_called_once_with(database)
mock_find_repo.assert_called_once_with(database)
mock_get_engine.assert_called_once_with(database)
mock_upgrade.assert_called_once_with('engine', 'repo', None)
self.assertFalse(mock_downgrade.called)
def test_downgrade(self, mock_get_engine, mock_downgrade, mock_upgrade,
mock_find_repo, mock_version):
database = 'fake'
migration.db_sync(1, database=database)
mock_version.assert_called_once_with(database)
mock_find_repo.assert_called_once_with(database)
mock_get_engine.assert_called_once_with(database)
mock_downgrade.assert_called_once_with('engine', 'repo', 1)
self.assertFalse(mock_upgrade.called)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(versioning_api, 'db_version')
@mock.patch.object(migration, 'get_engine')
class TestDbVersion(test.NoDBTestCase):
def test_db_version(self, mock_get_engine, mock_db_version,
mock_find_repo):
database = 'fake'
mock_get_engine.return_value = 'engine'
migration.db_version(database)
mock_find_repo.assert_called_once_with(database)
mock_db_version.assert_called_once_with('engine', 'repo')
def test_not_controlled(self, mock_get_engine, mock_db_version,
mock_find_repo):
database = 'api'
mock_get_engine.side_effect = ['engine', 'engine', 'engine']
exc = versioning_exceptions.DatabaseNotControlledError()
mock_db_version.side_effect = [exc, '']
metadata = mock.MagicMock()
metadata.tables.return_value = []
with mock.patch.object(sqlalchemy, 'MetaData',
metadata), mock.patch.object(migration,
'db_version_control') as mock_version_control:
migration.db_version(database)
mock_version_control.assert_called_once_with(0, database)
db_version_calls = [mock.call('engine', 'repo')] * 2
self.assertEqual(db_version_calls, mock_db_version.call_args_list)
engine_calls = [mock.call(database)] * 3
self.assertEqual(engine_calls, mock_get_engine.call_args_list)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(migration, 'get_engine', return_value='engine')
@mock.patch.object(versioning_api, 'version_control')
class TestDbVersionControl(test.NoDBTestCase):
def test_version_control(self, mock_version_control, mock_get_engine,
mock_find_repo):
database = 'fake'
migration.db_version_control(database=database)
mock_find_repo.assert_called_once_with(database)
mock_version_control.assert_called_once_with('engine', 'repo', None)
class TestGetEngine(test.NoDBTestCase):
def test_get_main_engine(self):
with mock.patch.object(db_api, 'get_engine',
return_value='engine') as mock_get_engine:
engine = migration.get_engine()
self.assertEqual('engine', engine)
mock_get_engine.assert_called_once_with()
def test_get_api_engine(self):
with mock.patch.object(db_api, 'get_api_engine',
return_value='api_engine') as mock_get_engine:
engine = migration.get_engine('api')
self.assertEqual('api_engine', engine)
mock_get_engine.assert_called_once_with()
class TestFlavorCheck(test.TestCase):
def setUp(self):
super(TestFlavorCheck, self).setUp()
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.migrate_repo.versions.'
'291_enforce_flavors_migrated')
self.engine = db_api.get_engine()
def test_upgrade_clean(self):
inst = objects.Instance(context=self.context,
uuid=uuid.uuid4(),
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar'})
inst.create()
self.migration.upgrade(self.engine)
def test_upgrade_dirty(self):
inst = objects.Instance(context=self.context,
uuid=uuid.uuid4(),
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar',
'instance_type_id': 'foo'})
inst.create()
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_flavor_deleted_instances(self):
inst = objects.Instance(context=self.context,
uuid=uuid.uuid4(),
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar',
'instance_type_id': 'foo'})
inst.create()
inst.destroy()
self.migration.upgrade(self.engine)
class TestNewtonCheck(test.TestCase):
def setUp(self):
super(TestNewtonCheck, self).setUp()
self.useFixture(nova_fixtures.DatabaseAtVersion(329))
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.migrate_repo.versions.'
'330_enforce_mitaka_online_migrations')
self.engine = db_api.get_engine()
def test_all_migrated(self):
cn = objects.ComputeNode(context=self.context,
vcpus=1, memory_mb=512, local_gb=10,
vcpus_used=0, memory_mb_used=256,
local_gb_used=5, hypervisor_type='HyperDanVM',
hypervisor_version='34', cpu_info='foo')
cn.create()
objects.Aggregate(context=self.context,
name='foo').create()
objects.PciDevice.create(self.context, {})
self.migration.upgrade(self.engine)
def test_cn_not_migrated(self):
cn = objects.ComputeNode(context=self.context,
vcpus=1, memory_mb=512, local_gb=10,
vcpus_used=0, memory_mb_used=256,
local_gb_used=5, hypervisor_type='HyperDanVM',
hypervisor_version='34', cpu_info='foo')
cn.create()
db_api.compute_node_update(self.context, cn.id, {'uuid': None})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_aggregate_not_migrated(self):
agg = db_api.aggregate_create(self.context, {"name": "foobar"})
db_api.aggregate_update(self.context, agg.id, {'uuid': None})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_pci_device_not_migrated(self):
db_api.pci_device_update(self.context, 1, 'foo:bar',
{'parent_addr': None,
'compute_node_id': 1,
'address': 'foo:bar',
'vendor_id': '123',
'product_id': '456',
'dev_type': 'foo',
'label': 'foobar',
'status': 'whatisthis?'})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
| 44.843416
| 79
| 0.637965
|
0d32edff94c3ac887bd837bfca0e6d5a4ccb5596
| 83
|
py
|
Python
|
Know Your Code/Python/area_of_circle/area of circle.py
|
rswalia/open-source-contribution-for-beginners
|
1ea29479c6d949760c83926b4c43a6b0d33ad0a0
|
[
"MIT"
] | 35
|
2021-12-20T13:37:01.000Z
|
2022-03-22T20:52:36.000Z
|
Know Your Code/Python/area_of_circle/area of circle.py
|
rswalia/open-source-contribution-for-beginners
|
1ea29479c6d949760c83926b4c43a6b0d33ad0a0
|
[
"MIT"
] | 152
|
2021-11-01T06:00:11.000Z
|
2022-03-20T11:40:00.000Z
|
Know Your Code/Python/area_of_circle/area of circle.py
|
rswalia/open-source-contribution-for-beginners
|
1ea29479c6d949760c83926b4c43a6b0d33ad0a0
|
[
"MIT"
] | 71
|
2021-11-01T06:02:37.000Z
|
2022-03-20T04:49:30.000Z
|
r=float(input("Enter radius of circle:"))
a=3.14*(r**2)
print("area of circle:",a)
| 20.75
| 41
| 0.650602
|
e48a287119e11879dfba04e7e75eef724491ce5e
| 2,532
|
py
|
Python
|
c_src/deps/boringssl/util/bot/update_clang.py
|
onedata/erlang-tls
|
1c0d2c3eb8534540a5effb7f040923a0765eccf0
|
[
"BSL-1.0"
] | 15
|
2016-10-16T06:14:21.000Z
|
2021-12-22T18:10:17.000Z
|
third_party/boringssl/src/util/bot/update_clang.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/boringssl/src/util/bot/update_clang.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | 7
|
2018-01-08T02:53:32.000Z
|
2020-10-15T13:01:46.000Z
|
# Copyright (c) 2015, Google Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os.path
import shutil
import sys
import tarfile
import tempfile
import urllib
# CLANG_REVISION and CLANG_SUB_REVISION determine the build of clang
# to use. These should be synced with tools/clang/scripts/update.py in
# Chromium.
CLANG_REVISION = "267383"
CLANG_SUB_REVISION = "1"
PACKAGE_VERSION = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION)
LLVM_BUILD_DIR = os.path.join(os.path.dirname(__file__), "llvm-build")
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, "cr_build_revision")
CDS_URL = "https://commondatastorage.googleapis.com/chromium-browser-clang"
def DownloadFile(url, path):
"""DownloadFile fetches |url| to |path|."""
last_progress = [0]
def report(a, b, c):
progress = int(a * b * 100.0 / c)
if progress != last_progress[0]:
print >> sys.stderr, "Downloading... %d%%" % progress
last_progress[0] = progress
urllib.urlretrieve(url, path, reporthook=report)
def main(args):
# For now, only download clang on Linux.
if not sys.platform.startswith("linux"):
return 0
if os.path.exists(STAMP_FILE):
with open(STAMP_FILE) as f:
if f.read().strip() == PACKAGE_VERSION:
print >> sys.stderr, "Clang already at %s" % (PACKAGE_VERSION,)
return 0
if os.path.exists(LLVM_BUILD_DIR):
shutil.rmtree(LLVM_BUILD_DIR)
print >> sys.stderr, "Downloading Clang %s" % (PACKAGE_VERSION,)
cds_full_url = "%s/Linux_x64/clang-%s.tgz" % (CDS_URL, PACKAGE_VERSION)
with tempfile.NamedTemporaryFile() as temp:
DownloadFile(cds_full_url, temp.name)
with tarfile.open(temp.name, "r:gz") as tar_file:
tar_file.extractall(LLVM_BUILD_DIR)
with open(STAMP_FILE, "wb") as stamp_file:
stamp_file.write(PACKAGE_VERSION)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 35.166667
| 78
| 0.731043
|
e2173b2f09759229919531b14a883e56e6ab0891
| 687
|
py
|
Python
|
homework/tree-traversal/utils.py
|
OscarFlores-IFi/credit-rik-lecture
|
cc522672ae26ff19cf857987c4a14fb5f8409f15
|
[
"MIT"
] | null | null | null |
homework/tree-traversal/utils.py
|
OscarFlores-IFi/credit-rik-lecture
|
cc522672ae26ff19cf857987c4a14fb5f8409f15
|
[
"MIT"
] | null | null | null |
homework/tree-traversal/utils.py
|
OscarFlores-IFi/credit-rik-lecture
|
cc522672ae26ff19cf857987c4a14fb5f8409f15
|
[
"MIT"
] | null | null | null |
import functools
import json
def pretty_print(logger, serializer_function=lambda obj: obj.__dict__):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
serializable_obj = func(*args, **kwargs)
try:
formatted_output = json.dumps(serializable_obj, indent=4, default=serializer_function)
print(formatted_output)
except TypeError as e:
logger.error(f"Type Error encounter with message {e}")
raise # Re-throw exception to fail the program execution with stack-trace.
return wrapper
return decorator
def sum_levels(parent):
pass
| 31.227273
| 102
| 0.637555
|
356c576d450b7bb780917f800414964ca365b2cc
| 1,211
|
py
|
Python
|
app1/meeting_app/report/students_enrolled_in_cursee_report/students_enrolled_in_cursee_report.py
|
jjaycaneza/school_test
|
b385bf70e98a9ba237c3b8bb53eba03b5ad1a0e1
|
[
"MIT"
] | null | null | null |
app1/meeting_app/report/students_enrolled_in_cursee_report/students_enrolled_in_cursee_report.py
|
jjaycaneza/school_test
|
b385bf70e98a9ba237c3b8bb53eba03b5ad1a0e1
|
[
"MIT"
] | null | null | null |
app1/meeting_app/report/students_enrolled_in_cursee_report/students_enrolled_in_cursee_report.py
|
jjaycaneza/school_test
|
b385bf70e98a9ba237c3b8bb53eba03b5ad1a0e1
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, justine and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_courses_information(course):
print(course)
if(course == None):
coursesData = frappe.db.sql("""select * from `tabStudent Information` """,as_dict=True)
else:
coursesData = frappe.db.sql("""select * from `tabStudent Information` where `tabStudent Information`.`course` = %s""", (course),as_dict=True)
print(coursesData)
jsonData = []
for i in range(len(coursesData)):
print(coursesData[i])
jsonData.append({
'studentid': coursesData[i].name,
'fullname': coursesData[i].fullname,
'course': coursesData[i].course
})
return jsonData
def execute(filters=None):
coursesData = get_courses_information(course=filters.course)
columns = [
{
'fieldname': 'studentid',
'label': ('Student ID'),
'fieldtype': 'Data',
'options': ''
},
{
'fieldname': 'fullname',
'label': ('Full Name'),
'fieldtype': 'Data',
'options': ''
},
{
'fieldname': 'course',
'label': ('Course'),
'fieldtype': 'Link',
'options': 'Courses'
}
]
data = coursesData
return columns, data
| 22.018182
| 146
| 0.663914
|
f9c33f750f76e1591794eb7c14591434a66454a2
| 1,677
|
py
|
Python
|
Aula18/app.py
|
icarogoggin/BlueEdtech_Modulo2
|
ca3552624e51d15181927dc882c30572093a1b7d
|
[
"MIT"
] | null | null | null |
Aula18/app.py
|
icarogoggin/BlueEdtech_Modulo2
|
ca3552624e51d15181927dc882c30572093a1b7d
|
[
"MIT"
] | null | null | null |
Aula18/app.py
|
icarogoggin/BlueEdtech_Modulo2
|
ca3552624e51d15181927dc882c30572093a1b7d
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
user = ''
password = ''
host = ''
database = ''
app.config['SQLALCHEMY_DATABASE_URI'] = f'postgresql://{user}:{password}@{host}/{database}'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = ""
db = SQLAlchemy(app)
class Filmes(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(255), nullable=False)
imagem_url = db.Column(db.String(255), nullable=False)
def __init__(self, nome, imagem_url):
self.nome = nome
self.imagem_url = imagem_url
@staticmethod
def read_all():
return Filmes.query.order_by(Filmes.id.asc()).all()
@staticmethod
def read_single(id_registro):
return Filmes.query.get(id_registro)
def save(self):
db.session.add(self)
db.session.commit()
@app.route("/")
def index():
return render_template("index.html")
@app.route("/read")
def read_all():
registros = Filmes.read_all()
return render_template("read_all.html", registros=registros)
@app.route("/read/<id_registro>")
def read_id(id_registro):
registro = Filmes.read_single(id_registro)
return render_template("read_single.html", registro=registro)
@app.route("/create", methods=('GET', 'POST'))
def create():
novo_id = None
if request.method == 'POST':
form = request.form
registro = Filmes(form['nome'], form['imagem_url'])
registro.save()
novo_id = registro.id
return render_template("create.html", novo_id=novo_id)
if (__name__ == "__main__"):
app.run(debug=True)
| 23.619718
| 91
| 0.670244
|
22fcc123f3515b414b856144e9c91b8d020e0fc7
| 6,205
|
py
|
Python
|
mmcls/datasets/dataset_wrappers.py
|
xhdtc8/mmclassification
|
606015470be69aaa3ea19b1a1bfed598c2ec5785
|
[
"Apache-2.0"
] | null | null | null |
mmcls/datasets/dataset_wrappers.py
|
xhdtc8/mmclassification
|
606015470be69aaa3ea19b1a1bfed598c2ec5785
|
[
"Apache-2.0"
] | null | null | null |
mmcls/datasets/dataset_wrappers.py
|
xhdtc8/mmclassification
|
606015470be69aaa3ea19b1a1bfed598c2ec5785
|
[
"Apache-2.0"
] | null | null | null |
import bisect
import math
from collections import defaultdict
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
add `get_cat_ids` function.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
def get_cat_ids(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset(object):
"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in [1], in each epoch, an image may appear multiple
times based on its "repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :func:`self.get_cat_ids(idx)` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: f(c)
2. For each category c, compute the category-level repeat factor:
r(c) = max(1, sqrt(t/f(c)))
3. For each image I and its labels L(I), compute the image-level repeat
factor:
r(I) = max_{c in L(I)} r(c)
References:
.. [1] https://arxiv.org/pdf/1908.03195.pdf
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with `f_c` >= `oversample_thr`, there is
no oversampling. For categories with `f_c` < `oversample_thr`, the
degree of oversampling following the square-root inverse frequency
heuristic above.
"""
def __init__(self, dataset, oversample_thr):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_index, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_index] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
# print('-'*60)
# print('a=',[self.dataset.get_cat_ids(idx).tolist()])
# cat_ids = set(self.dataset.get_cat_ids(idx))
cat_ids = set([self.dataset.get_cat_ids(idx).tolist()])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
assert v > 0, f'caterogy {k} does not contain any images'
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I and its labels L(I), compute the image-level
# repeat factor:
# r(I) = max_{c in L(I)} r(c)
repeat_factors = []
for idx in range(num_images):
# cat_ids = set(self.dataset.get_cat_ids(idx))
cat_ids = set([self.dataset.get_cat_ids(idx).tolist()])
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def __len__(self):
return len(self.repeat_indices)
| 37.155689
| 167
| 0.646737
|
1a790ae7aeca8e872c40bb87c94be5c10c5c16a9
| 5,046
|
py
|
Python
|
pyaz/network/private_link_service/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/private_link_service/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/private_link_service/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from ... pyaz_utils import _call_az
from . import connection
def create(lb_frontend_ip_configs, name, resource_group, subnet, auto_approval=None, edge_zone=None, enable_proxy_protocol=None, fqdns=None, lb_name=None, location=None, private_ip_address=None, private_ip_address_version=None, private_ip_allocation_method=None, public_ip_address=None, tags=None, visibility=None, vnet_name=None):
'''
Create a private link service.
Required Parameters:
- lb_frontend_ip_configs -- Space-separated list of names or IDs of load balancer frontend IP configurations to link to. If names are used, also supply `--lb-name`.
- name -- Name of the private link service.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- subnet -- Name or ID of subnet to use. If name provided, also supply `--vnet-name`.
Optional Parameters:
- auto_approval -- Space-separated list of subscription IDs to auto-approve.
- edge_zone -- The name of edge zone.
- enable_proxy_protocol -- Enable proxy protocol for private link service.
- fqdns -- Space-separated list of FQDNs.
- lb_name -- Name of the load balancer to retrieve frontend IP configs from. Ignored if a frontend IP configuration ID is supplied.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- private_ip_address -- Static private IP address to use.
- private_ip_address_version -- IP version of the private IP address.
- private_ip_allocation_method -- Private IP address allocation method
- public_ip_address -- Name or ID of the a public IP address to use.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- visibility -- Space-separated list of subscription IDs for which the private link service is visible.
- vnet_name -- The virtual network (VNet) name.
'''
return _call_az("az network private-link-service create", locals())
def delete(name, resource_group):
'''
Delete a private link service.
Required Parameters:
- name -- Name of the private link service.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network private-link-service delete", locals())
def list(resource_group=None):
'''
List private link services.
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network private-link-service list", locals())
def show(name, resource_group, expand=None):
'''
Get the details of a private link service.
Required Parameters:
- name -- Name of the private link service.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- expand -- Expands referenced resources.
'''
return _call_az("az network private-link-service show", locals())
def update(name, resource_group, add=None, auto_approval=None, enable_proxy_protocol=None, force_string=None, fqdns=None, lb_frontend_ip_configs=None, lb_name=None, remove=None, set=None, tags=None, visibility=None):
'''
Update a private link service.
Required Parameters:
- name -- Name of the private link service.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- auto_approval -- Space-separated list of subscription IDs to auto-approve.
- enable_proxy_protocol -- Enable proxy protocol for private link service.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- fqdns -- Space-separated list of FQDNs.
- lb_frontend_ip_configs -- Space-separated list of names or IDs of load balancer frontend IP configurations to link to. If names are used, also supply `--lb-name`.
- lb_name -- Name of the load balancer to retrieve frontend IP configs from. Ignored if a frontend IP configuration ID is supplied.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- visibility -- Space-separated list of subscription IDs for which the private link service is visible.
'''
return _call_az("az network private-link-service update", locals())
| 55.450549
| 331
| 0.726912
|
48df543945ccfb039201443f8de5fd07a6556c30
| 9,385
|
py
|
Python
|
build_tables.py
|
sunaga-lab/busmapper
|
5e22d9d521c6b52f43494c8c6c1990fe1443382f
|
[
"MIT"
] | null | null | null |
build_tables.py
|
sunaga-lab/busmapper
|
5e22d9d521c6b52f43494c8c6c1990fe1443382f
|
[
"MIT"
] | 1
|
2018-01-16T03:33:43.000Z
|
2018-01-16T03:33:43.000Z
|
build_tables.py
|
sunaga-lab/busmapper
|
5e22d9d521c6b52f43494c8c6c1990fe1443382f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from busmap import pdfutil, htmlutil, tablereader, preset_data_reader
from busmap.build_script import *
import shutil
# ビルド設定
# PDFMinerのpdf2txt.pyのパス
pdfutil.PDF2TXT_PATH = shutil.which('pdf2txt.py')
# PDFレイアウトとかのデバッグ
pdfutil.pdfutil_debug_enabled = False
# tablereaderのデバッグ
tablereader.tablereader_debug_enabled = False
debug_pages = []
@build_group("木更津-品川")
def parse_kisarazu_shinagawa():
@build_for('stables')
def build():
pages = load_pdfpages_from_url('http://www.nitto-kotsu.co.jp/img/kosoku/shinagawa.pdf')
page = pages[0]
debug_pages.append(page)
dojyujitsu_text = page.search_text_contains("土休日").first()
weekday_area = page.clipped(page.bounds.left_area_of(dojyujitsu_text.rect), inclusive=False, clip_name='平日')
kuradi = weekday_area.search_text_contains("下り").first()
@build_for('T1.json')
def build():
# 平日上りのパース
clipped_page = weekday_area.clipped(weekday_area.bounds.left_area_of(kuradi.rect), inclusive=False, clip_name='上り')
tr = pdfutil.TableRecognizer(clipped_page)
tr.decl_column(clipped_page.search_text_contains("平日").first().dmark())
tr.decl_column(clipped_page.search_text_contains("木更津東口発").first().dmark())
tr.decl_column(clipped_page.search_text_contains("長浦駅北口発").first().dmark())
tr.decl_column(clipped_page.search_text_contains("袖ケ浦駅発").first().dmark())
tr.decl_column(clipped_page.search_text_contains("袖ケ浦BT発").first().dmark())
tr.decl_column(clipped_page.search_text_contains("金田BT発").first().dmark())
tr.decl_column(clipped_page.search_text_contains("品川駅東口着").first().dmark())
tr.decl_sequential_rows(clipped_page.search_text_contains("上り").first(), range(1, 500))
return tr.gen_with_table_attrs(tablename='T上り', linename='高速バス木更津・品川線', day_options=['weekday'])
@build_for('T2.json')
def build():
# 平日下りのパース
clipped_page = weekday_area.clipped(weekday_area.bounds.right_area_of(kuradi.rect.left), inclusive=True, clip_name='下り')
tr = pdfutil.TableRecognizer(clipped_page)
tr.decl_column(clipped_page.search_text_contains("平日").first().dmark())
tr.decl_column(clipped_page.search_text_contains("木更津東口着").first().dmark())
tr.decl_column(clipped_page.search_text_contains("長浦駅北口着").first().dmark())
tr.decl_column(clipped_page.search_text_contains("袖ケ浦駅着").first().dmark())
tr.decl_column(clipped_page.search_text_contains("袖ケ浦BT着").first().dmark())
tr.decl_column(clipped_page.search_text_contains("金田BT着").first().dmark())
tr.decl_column(clipped_page.search_text_contains("品川駅東口発").first().dmark())
tr.decl_sequential_rows(clipped_page.search_text_contains("下り").first(), range(1, 500))
return tr.gen_with_table_attrs(tablename='T下り', linename='高速バス木更津・品川線', day_options=['weekday'])
import_tables('T1.json', 'T2.json')
@build_group("木更津-羽田")
def parse_kisarazu_haneda():
@build_for('tables')
def build():
pages = load_pdfpages_from_url('http://www.nitto-kotsu.co.jp/img/kosoku/kosoku-kisarazu-haneda.pdf')
page = pages[0]
debug_pages.append(page)
kuradi = page.search_text_contains("【下り】").first()
nobori = page.search_text_contains("【上り】").first()
noriba_goannai = page.search_text_contains("【のりばご案内】").first()
@build_for('T1.json')
def build():
# 平日上りのパース
clipped_page = page.clipped(
page.bounds.left_area_of(kuradi.rect).bottom_area_of(nobori.rect).top_area_of(noriba_goannai.rect),
inclusive=False, clip_name='上り')
r = pdfutil.TableRecognizer(clipped_page)
r.decl_column(clipped_page.search_text_contains("便").first().dmark())
r.decl_column(clipped_page.search_text_contains("会社").first().dmark())
r.decl_column(clipped_page.search_text_contains("木更津駅").first().dmark())
r.decl_column(clipped_page.search_text_contains("袖ヶ浦BT").first().dmark())
r.decl_column(clipped_page.search_text_contains("金田BT").first().dmark())
r.decl_column(clipped_page.search_text_contains("第1ターミナル").first().dmark())
r.decl_column(clipped_page.search_text_contains("第2ターミナル").first().dmark())
r.decl_column(clipped_page.search_text_contains("国際ターミナル").first().dmark())
r.decl_sequential_rows(clipped_page.search_text_contains("便").first(), range(1, 500))
return r.gen_with_table_attrs(tablename='T上り', linename='高速バス木更津・羽田空港線', day_options=['weekday'])
@build_for('T2.json')
def build():
# 平日下りのパース
clipped_page = page.clipped(page.bounds.right_area_of(kuradi.rect.left).bottom_area_of(nobori.rect).top_area_of(noriba_goannai.rect), inclusive=False,
clip_name='下り')
r = pdfutil.TableRecognizer(clipped_page)
r.decl_column(clipped_page.search_text_contains("便").first().dmark())
r.decl_column(clipped_page.search_text_contains("会社").first().dmark())
r.decl_column(clipped_page.search_text_contains("木更津駅").first().dmark())
r.decl_column(clipped_page.search_text_contains("袖ヶ浦BT").first().dmark())
r.decl_column(clipped_page.search_text_contains("金田BT").first().dmark())
r.decl_column(clipped_page.search_text_contains("第1ターミナル").first().dmark())
r.decl_column(clipped_page.search_text_contains("第2ターミナル").first().dmark())
r.decl_column(clipped_page.search_text_contains("国際ターミナル").first().dmark())
r.decl_sequential_rows(clipped_page.search_text_contains("便").first(), range(1, 500))
return r.gen_with_table_attrs(tablename='T下り', linename='高速バス木更津・羽田空港線', day_options=['weekday'])
import_tables(
'T1.json',
'T2.json'
)
@build_group("木更津-川崎")
def parse_kisarazu_kawasaki():
@build_for('tables')
def build():
reader = htmlutil.HTMLReader("http://www.keikyu-bus.co.jp/highway/k-sodegaura/")
@build_for('T1.json')
def build():
tbls = reader.make_table_readers("//*[text()='袖ヶ浦バスターミナル・木更津駅ゆき']/following::table")
table = tbls[0].concat_vert_with(tbls[1])
return table.with_table_atts(tablename="T下り", linename='高速バス木更津・川崎線', day_options=['weekday'])
@build_for('T2.json')
def build():
tbls = reader.make_table_readers("//*[text()='川崎駅ゆき']/following::table")
table = tbls[0].concat_vert_with(tbls[1])
return table.with_table_atts(tablename="T上り", linename='高速バス木更津・川崎線', day_options=['weekday'])
import_tables(
'T1.json',
'T2.json'
)
@build_group("木更津-新宿")
def parse_kisarazu_shinjuku():
@build_for('tables.json')
def build():
reader = htmlutil.HTMLReader("http://www.odakyubus.co.jp/highway/line/aqualine.html")
tables = reader.make_table_readers("//*[text()='平日(月~金)']/following::table")
tables[0].with_table_atts(
tablename="T下り",
linename='高速バス木更津・新宿線',
day_options=['weekday']
)
tables[1].with_table_atts(
tablename="T上り",
linename='高速バス木更津・新宿線',
day_options=['weekday']
)
return tables
import_tables(
'tables.json'
)
@build_group("木更津-東京")
def parse_kisarazu_tokyo():
@build_for('tables')
def build():
reader = htmlutil.HTMLReader("http://www.keiseibus.co.jp/kousoku/timetable.php?id=38")
@build_for('for-tokyo-tables.json')
def build():
tables = reader.make_table_readers(
"//*[text()='東雲車庫・東京駅行き']/following::dd[1]//table",
head_column_index=1
)
for i, table in enumerate(tables):
table.with_table_atts(
tablename="T上り" + str(i + 1),
linename='高速バス木更津・東京線',
day_options=['weekday'],
invert_axis=True
)
return tables
@build_for('for-kisarazu-tables.json')
def build():
tables = reader.make_table_readers(
"//*[text()='木更津駅・君津行き']/following::dd[1]//table",
head_column_index=1
)
for i, table in enumerate(tables):
table.with_table_atts(
tablename="T下り" + str(i + 1),
linename='高速バス木更津・東京線',
day_options=['weekday'],
invert_axis=True
)
return tables
import_tables(
'for-tokyo-tables.json',
'for-kisarazu-tables.json'
)
def main():
try:
build_all()
db.dump('tmp/dbdump.txt', format='json', for_debug=True)
db.dump('www/db.js', format='jsonp', field_name='DB')
db.dump('www/db.json', format='json')
db.dump('tmp/db_debug.txt', format='debug_text')
finally:
print("Building debug pages...")
for p in debug_pages:
p.flush_debug_marks()
if __name__ == '__main__':
main()
| 39.93617
| 162
| 0.627064
|
56144cad38468463dd98bd6f792229fbd4be88ff
| 2,715
|
py
|
Python
|
chia_tea/cli/start.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 6
|
2021-08-05T21:31:15.000Z
|
2021-11-15T20:54:25.000Z
|
chia_tea/cli/start.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 49
|
2021-08-05T19:33:08.000Z
|
2022-03-30T19:33:38.000Z
|
chia_tea/cli/start.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 1
|
2022-01-09T17:08:32.000Z
|
2022-01-09T17:08:32.000Z
|
import typer
from ..monitoring.run_server import run_monitoring_server
from ..utils.config import DEFAULT_CONFIG_FILEPATH, read_config
from ..monitoring.run_client import run_monitoring_client
from ..discord.bot import run_discord_bot
from ..copy.main import run_copy
start_cmd = typer.Typer(
no_args_is_help=True,
help="Start chia-tea tools and processes.",
)
@start_cmd.command(name="copy")
def copy_cmd(config: str = DEFAULT_CONFIG_FILEPATH) -> None:
"""Copy files from one place to another.
For source and target directories please see the config file.
You can get the standard config-file filepath by running
`chia-tea config location`.
"""
try:
config = read_config(filepath=config)
run_copy(config=config)
except KeyboardInterrupt:
# just stopping it, that is ok
pass
except Exception as err:
typer.echo(f"⛈️ Error: {err}")
raise typer.Exit(1)
finally:
typer.echo("Stopping copy")
@start_cmd.command("monitoring-client")
def monitoring_client_cmd(config: str = DEFAULT_CONFIG_FILEPATH):
"""Starts the monitoring client observing chia and the machine"""
exit_code = 0
try:
config = read_config(filepath=config)
run_monitoring_client(config)
except KeyboardInterrupt:
# just stopping it, that is ok
pass
except Exception as err:
typer.echo(f"⛈️ Error: {err}")
exit_code = 1
finally:
typer.echo("Shutting down monitoring client.")
raise typer.Exit(exit_code)
@start_cmd.command("monitoring-server")
def monitoring_server_cmd(config: str = DEFAULT_CONFIG_FILEPATH):
"""Starts the server receiving and storing monitoring data"""
exit_code = 0
try:
config = read_config(filepath=config)
run_monitoring_server(config)
except KeyboardInterrupt:
# just stopping it, that is ok
pass
except Exception as err:
typer.echo(f"⛈️ Error: {err}")
exit_code = 1
finally:
typer.echo("Shutting down monitoring server.")
raise typer.Exit(exit_code)
@start_cmd.command("discord-bot")
def discord_bot_cmd(config: str = DEFAULT_CONFIG_FILEPATH):
"""
Start the discord bot watching the monitoring database.
"""
exit_code = 0
try:
config = read_config(filepath=config)
run_discord_bot(config.discord.token, config.discord.channel_id)
except KeyboardInterrupt:
# just stopping it, that is ok
pass
except Exception as err:
typer.echo(f"⛈️ Error: {err}")
exit_code = 1
finally:
typer.echo("Shutting down discord-bot.")
raise typer.Exit(exit_code)
| 28.882979
| 72
| 0.674033
|
8d24dc5a5920abe50ec2dcf97539d298e1f517c1
| 1,678
|
py
|
Python
|
action-transformer/nets/proposal_layer.py
|
leaderj1001/Action-Localization
|
04d972e6dc3c07d347c70893723d91487c1c8cbd
|
[
"MIT"
] | 24
|
2019-07-10T15:13:27.000Z
|
2021-07-08T12:12:40.000Z
|
action-baseline/nets/proposal_layer.py
|
leaderj1001/Action-Localization
|
04d972e6dc3c07d347c70893723d91487c1c8cbd
|
[
"MIT"
] | 7
|
2019-10-06T12:22:04.000Z
|
2020-04-15T13:14:10.000Z
|
action-transformer/nets/proposal_layer.py
|
leaderj1001/Action-Localization
|
04d972e6dc3c07d347c70893723d91487c1c8cbd
|
[
"MIT"
] | 4
|
2019-10-31T09:01:15.000Z
|
2021-03-26T04:20:21.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from utils.config import cfg
from nets.bbox_transform import bbox_transform_inv, clip_boxes
from torchvision.ops import nms
import torch
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""
A simplified version compared to fast/er RCNN For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.view((-1, 4))
scores = scores.contiguous().view(-1, 1)
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
scores, order = scores.view(-1).sort(descending=True)
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
scores = scores[:pre_nms_topN].view(-1, 1)
proposals = proposals[order.data, :]
# Non-maximal suppression
keep = nms(proposals, scores.squeeze(1), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep, ]
# Only support single image as input
batch_inds = proposals.new_zeros(proposals.size(0), 1)
blob = torch.cat((batch_inds, proposals), 1)
return blob, scores
| 32.901961
| 102
| 0.711561
|
c64e249883ef9b798a20cc1d11a96d0c8e6be879
| 12,537
|
py
|
Python
|
core/movieinfo.py
|
devqore/Watcher3
|
f7f22d077bdcc34bdee7bc30691bef6f59b4bed4
|
[
"Apache-2.0"
] | null | null | null |
core/movieinfo.py
|
devqore/Watcher3
|
f7f22d077bdcc34bdee7bc30691bef6f59b4bed4
|
[
"Apache-2.0"
] | null | null | null |
core/movieinfo.py
|
devqore/Watcher3
|
f7f22d077bdcc34bdee7bc30691bef6f59b4bed4
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from time import time, sleep
import core
import os
from core.helpers import Comparisons, Url
_k = Comparisons._k
logging = logging.getLogger(__name__)
class TheMovieDatabase(object):
tokens = 30 # int initial amount of tokens for TMDB rate limiting
last_token_fill = time() # float epoch time of last token fill
token_capacity = 30 # int max number of tokens. TMDB allows more, but artificially restricting the hit rate doesn't hurt
@staticmethod
def _get_tokens():
''' Refills TMDB tokens if possible
If tokens are needed, checks if they've been refilled in the
last 10 seconds and tops off the capacity.
Returns int # of tmdb tokens available
'''
if TheMovieDatabase.tokens < TheMovieDatabase.token_capacity:
now = time()
if (now - TheMovieDatabase.last_token_fill) > 10:
TheMovieDatabase.tokens = TheMovieDatabase.token_capacity
TheMovieDatabase.last_token_fill = time()
return TheMovieDatabase.tokens
@staticmethod
def _use_token():
''' Uses tmdb api token
Used as a blocking method before url requests.
If remaining tokens are fewer than 3 waits for refill.
Does not return
'''
while TheMovieDatabase._get_tokens() < 3:
sleep(0.3)
TheMovieDatabase.tokens -= 1
@staticmethod
def search(search_term, single=False):
''' Search TMDB for all matches
search_term (str): title of movie to search for
single (bool): return only first result <optional - default False>
Can accept imdbid, title, or title+year and dispatches accordingly.
Passes term to find_imdbid or find_title depending on the data recieved.
Returns list of dicts of individual movies from the find_x function.
'''
logging.info('Searching TheMovieDB for {}'.format(search_term))
if search_term[:2] == 'tt' and search_term[2:].isdigit():
movies = TheMovieDatabase._search_imdbid(search_term)
elif search_term[:5] == 'tmdb:' and search_term[5:].strip().isdigit():
movies = TheMovieDatabase._search_tmdbid(search_term[5:].strip())
if movies and 'status' in movies[0]:
# watcher thinks movie is already added when it has status, so we don't want status in search result
movies[0].pop('status')
else:
movies = TheMovieDatabase._search_title(search_term)
if not movies:
logging.info('Nothing found on TheMovieDatabase for {}'.format(search_term))
return []
if single:
return movies[0:1]
else:
return movies
@staticmethod
def _search_title(title):
''' Search TMDB for title
title (str): movie title
Title can include year ie Move Title 2017
Returns list of results
'''
logging.info('Searching TheMovieDB for title: {}.'.format(title))
title = Url.normalize(title)
url = 'https://api.themoviedb.org/3/search/movie?page=1&include_adult={}&'.format('true' if core.CONFIG['Search']['allowadult'] else 'false')
if len(title) > 4 and title[-4:].isdigit():
query = 'query={}&year={}'.format(title[:-5], title[-4:])
else:
query = 'query={}'.format(title)
url = url + query
logging.info('Searching TMDB {}'.format(url))
url = url + '&api_key={}'.format(_k(b'tmdb'))
TheMovieDatabase._use_token()
try:
results = json.loads(Url.open(url).text)
if results.get('success') == 'false':
return []
else:
return results['results'][:6]
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Error searching for title on TMDB.', exc_info=True)
return []
@staticmethod
def _search_imdbid(imdbid):
''' Search TMDB for imdb id #
imdbid (str): imdb id #
Returns list of results
'''
logging.info('Searching TheMovieDB for IMDB ID: {}.'.format(imdbid))
url = 'https://api.themoviedb.org/3/find/{}?language=en-US&external_source=imdb_id&append_to_response=alternative_titles,external_ids,release_dates'.format(imdbid)
logging.info('Searching TMDB {}'.format(url))
url = url + '&api_key={}'.format(_k(b'tmdb'))
TheMovieDatabase._use_token()
try:
results = json.loads(Url.open(url).text)
if results['movie_results'] == []:
return []
else:
response = results['movie_results'][0]
response['imdbid'] = imdbid
return [response]
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Error searching for IMDBID on TMDB.', exc_info=True)
return []
@staticmethod
def _search_tmdbid(tmdbid):
''' Search TMDB for tmdbid
tmdbid (str): themoviedatabase id #
Returns list of results
'''
logging.info('Searching TheMovieDB for TMDB ID: {}.'.format(tmdbid))
url = 'https://api.themoviedb.org/3/movie/{}?language=en-US&append_to_response=alternative_titles,external_ids,release_dates'.format(tmdbid)
logging.info('Searching TMDB {}'.format(url))
url = url + '&api_key={}'.format(_k(b'tmdb'))
TheMovieDatabase._use_token()
try:
response = Url.open(url)
if response.status_code != 200:
logging.warning('Unable to reach TMDB, error {}'.format(response.status_code))
return []
else:
results = json.loads(response.text)
results['imdbid'] = results.pop('imdb_id')
logging.warning('TMDB returned imdbid as {}'.format(results['imdbid']))
if results['imdbid'] == 'N/A' or results['imdbid'] == '':
logging.warning('TMDB did not have an IMDBid for this movie')
return []
else:
return [results]
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Error searching for TMDBID on TMDB.', exc_info=True)
return []
@staticmethod
def get_imdbid(tmdbid=None, title=None, year=''):
''' Gets imdbid from tmdbid or title and year
tmdbid (str): themoviedatabase id #
title (str): movie title
year (str/int): year of movie release
MUST supply either tmdbid or title. Year is optional with title, but results
are more reliable with it.
Returns str imdbid
'''
if not tmdbid and not title:
logging.warning('Neither tmdbid or title supplied. Unable to find imdbid.')
return ''
if not tmdbid:
title = Url.normalize(title)
year = Url.normalize(year)
url = 'https://api.themoviedb.org/3/search/movie?api_key={}&language=en-US&query={}&year={}&page=1&include_adult={}'.format(_k(b'tmdb'), title, year, 'true' if core.CONFIG['Search']['allowadult'] else 'false')
TheMovieDatabase._use_token()
try:
results = json.loads(Url.open(url).text)
results = results['results']
if results:
tmdbid = results[0]['id']
else:
return ''
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Error attempting to get TMDBID from TMDB.', exc_info=True)
return ''
url = 'https://api.themoviedb.org/3/movie/{}?api_key={}'.format(tmdbid, _k(b'tmdb'))
TheMovieDatabase._use_token()
try:
results = json.loads(Url.open(url).text)
return results.get('imdb_id')
except Exception as e:
logging.error('Error attempting to get IMDBID from TMDB.', exc_info=True)
return ''
@staticmethod
def get_category(cat, tmdbid=None):
''' get popular movies from TMDB
cat (str): category of movies to retrieve
tmdbid (str): tmdb id# to use for suggestions or similar
tmdbid required for section=similar, otherwise can be ignored.
Gets list of movies in cat from tmdbid (ie popular, now playing, coming soon, etc)
Returns list[dict]
'''
if cat == 'similar':
if tmdbid is None:
return []
url = 'https://api.themoviedb.org/3/movie/{}/similar?&language=en-US&page=1'.format(tmdbid)
else:
url = 'https://api.themoviedb.org/3/movie/{}?language=en-US&page=1'.format(cat)
url += '&api_key={}'.format(_k(b'tmdb'))
TheMovieDatabase._use_token()
try:
results = json.loads(Url.open(url).text)
if results.get('success') == 'false':
logging.warning('Bad request to TheMovieDatabase.')
return []
else:
return results['results']
except Exception as e:
logging.error('Unable to read {} movies from TheMovieDB.'.format(cat), exc_info=True)
return []
class YouTube(object):
@staticmethod
def trailer(title_date):
''' Gets trailer embed ID from Youtube.
title_date (str): movie title and date ('Movie Title 2016')
Attempts to connect 3 times in case Youtube is down or not responding
Can fail if no response is received.
Returns str
'''
logging.info('Getting trailer url from YouTube for {}'.format(title_date))
search_term = Url.normalize((title_date + '+trailer'))
url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&q={}&maxResults=1&key={}'.format(search_term, _k(b'youtube'))
tries = 0
while tries < 3:
try:
results = json.loads(Url.open(url).text)
return results['items'][0]['id']['videoId']
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
if tries == 2:
logging.error('Unable to get trailer from Youtube.', exc_info=True)
tries += 1
return ''
class Poster(object):
folder = os.path.join(core.POSTER_DIR)
if not os.path.exists(folder):
os.makedirs(folder)
@staticmethod
def save(imdbid, poster):
''' Saves poster locally
imdbid (str): imdb id #
poster (str): url of poster image.jpg
Saves poster as watcher/userdata/posters/[imdbid].jpg
Does not return
'''
logging.info('Downloading poster for {}.'.format(imdbid))
new_poster_path = os.path.join(Poster.folder, '{}.jpg'.format(imdbid))
if os.path.exists(new_poster_path):
logging.warning('{} already exists.'.format(new_poster_path))
return
else:
logging.info('Saving poster to {}'.format(new_poster_path))
try:
poster_bytes = Url.open(poster, stream=True).content
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Poster save_poster get', exc_info=True)
return
try:
with open(new_poster_path, 'wb') as output:
output.write(poster_bytes)
del poster_bytes
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Unable to save poster to disk.', exc_info=True)
return
logging.info('Poster saved to {}'.format(new_poster_path))
@staticmethod
def remove(imdbid):
''' Deletes poster from disk.
imdbid (str): imdb id #
Does not return
'''
logging.info('Removing poster for {}'.format(imdbid))
path = os.path.join(Poster.folder, '{}.jpg'.format(imdbid))
if os.path.exists(path):
os.remove(path)
else:
logging.warning('{} doesn\'t exist, cannot remove.'.format(path))
| 34.347945
| 221
| 0.579006
|
b300c59207eec14fb38e799fb46906cbb3460d7f
| 1,612
|
py
|
Python
|
project/tests/test__config.py
|
eghuro/crawlcheck-report
|
540592396e06340430e5c3baa7275fa002f8c367
|
[
"MIT"
] | 1
|
2016-09-20T09:07:34.000Z
|
2016-09-20T09:07:34.000Z
|
project/tests/test__config.py
|
eghuro/crawlcheck-report
|
540592396e06340430e5c3baa7275fa002f8c367
|
[
"MIT"
] | null | null | null |
project/tests/test__config.py
|
eghuro/crawlcheck-report
|
540592396e06340430e5c3baa7275fa002f8c367
|
[
"MIT"
] | 1
|
2016-09-20T09:07:37.000Z
|
2016-09-20T09:07:37.000Z
|
# project/server/tests/test_config.py
import unittest
from flask import current_app
from flask_testing import TestCase
from project.server import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertTrue(app.config['WTF_CSRF_ENABLED'] is False)
self.assertTrue(app.config['DEBUG_TB_ENABLED'] is True)
self.assertFalse(current_app is None)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertTrue(app.config['BCRYPT_LOG_ROUNDS'] == 4)
self.assertTrue(app.config['WTF_CSRF_ENABLED'] is False)
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is False)
self.assertTrue(app.config['DEBUG_TB_ENABLED'] is False)
self.assertTrue(app.config['WTF_CSRF_ENABLED'] is True)
self.assertTrue(app.config['BCRYPT_LOG_ROUNDS'] == 13)
if __name__ == '__main__':
unittest.main()
| 29.309091
| 73
| 0.709057
|
bfa7fd4f303e401470872f91a245e72d7069cc09
| 17,363
|
py
|
Python
|
examples/smbclient.py
|
black-security/impacket3
|
5b6145b3f511d4f7cddaba64cf150b109113afea
|
[
"Apache-1.1"
] | null | null | null |
examples/smbclient.py
|
black-security/impacket3
|
5b6145b3f511d4f7cddaba64cf150b109113afea
|
[
"Apache-1.1"
] | null | null | null |
examples/smbclient.py
|
black-security/impacket3
|
5b6145b3f511d4f7cddaba64cf150b109113afea
|
[
"Apache-1.1"
] | 1
|
2022-01-28T00:53:41.000Z
|
2022-01-28T00:53:41.000Z
|
#!/usr/bin/python
# Copyright (c) 2003-2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description: Mini shell using some of the SMB funcionality of the library
#
# Author:
# Alberto Solino (@agsolino)
#
#
# Reference for:
# SMB DCE/RPC
#
import sys
import string
import time
import logging
from impacket.examples import logger
from impacket import smb, version, smb3, nt_errors, version
from impacket.dcerpc.v5 import samr, transport, srvs
from impacket.dcerpc.v5.dtypes import NULL
from impacket.smbconnection import *
import argparse
import ntpath
import cmd
import os
# If you wanna have readline like functionality in Windows, install pyreadline
try:
import pyreadline as readline
except ImportError:
import readline
class MiniImpacketShell(cmd.Cmd):
def __init__(self, smbClient):
cmd.Cmd.__init__(self)
self.prompt = '# '
self.smb = smbClient
self.username, self.password, self.domain, self.lmhash, self.nthash, self.aesKey, self.TGT, self.TGS = smbClient.getCredentials()
self.tid = None
self.intro = 'Type help for list of commands'
self.pwd = ''
self.share = None
self.loggedIn = True
self.completion = []
def emptyline(self):
pass
def precmd(self,line):
# switch to unicode
return line.decode('utf-8')
def onecmd(self,s):
retVal = False
try:
retVal = cmd.Cmd.onecmd(self,s)
except Exception as e:
#import traceback
# print traceback.print_exc()
logging.error(e)
return retVal
def do_exit(self,line):
return True
def do_shell(self, line):
output = os.popen(line).read()
print(output)
self.last_output = output
def do_help(self,line):
print("""
open {host,port=445} - opens a SMB connection against the target host/port
login {domain/username,passwd} - logs into the current SMB connection, no parameters for NULL connection. If no password specified, it'll be prompted
kerberos_login {domain/username,passwd} - logs into the current SMB connection using Kerberos. If no password specified, it'll be prompted. Use the DNS resolvable domain name
login_hash {domain/username,lmhash:nthash} - logs into the current SMB connection using the password hashes
logoff - logs off
shares - list available shares
use {sharename} - connect to an specific share
cd {path} - changes the current directory to {path}
pwd - shows current remote directory
password - changes the user password, the new password will be prompted for input
ls {wildcard} - lists all the files in the current directory
rm {file} - removes the selected file
mkdir {dirname} - creates the directory under the current path
rmdir {dirname} - removes the directory under the current path
put {filename} - uploads the filename into the current path
get {filename} - downloads the filename from the current path
info - returns NetrServerInfo main results
who - returns the sessions currently connected at the target host (admin required)
close - closes the current SMB Session
exit - terminates the server process (and this session)
""")
def do_password(self, line):
if self.loggedIn is False:
logging.error("Not logged in")
return
from getpass import getpass
newPassword = getpass("New Password:")
rpctransport = transport.SMBTransport(self.smb.getRemoteHost(), filename=r'\samr', smb_connection=self.smb)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
resp = samr.hSamrUnicodeChangePasswordUser2(dce, '\x00', self.username, self.password, newPassword, self.lmhash, self.nthash)
self.password = newPassword
self.lmhash = None
self.nthash = None
def do_open(self,line):
l = line.split(' ')
port = 445
if len(l) > 0:
host = l[0]
if len(l) > 1:
port = int(l[1])
if port == 139:
self.smb = SMBConnection('*SMBSERVER', host, sess_port=port)
else:
self.smb = SMBConnection(host, host, sess_port=port)
dialect = self.smb.getDialect()
if dialect == SMB_DIALECT:
logging.info("SMBv1 dialect used")
elif dialect == SMB2_DIALECT_002:
logging.info("SMBv2.0 dialect used")
elif dialect == SMB2_DIALECT_21:
logging.info("SMBv2.1 dialect used")
else:
logging.info("SMBv3.0 dialect used")
self.share = None
self.tid = None
self.pwd = ''
self.loggedIn = False
self.password = None
self.lmhash = None
self.nthash = None
self.username = None
def do_login(self,line):
if self.smb is None:
logging.error("No connection open")
return
l = line.split(' ')
username = ''
password = ''
domain = ''
if len(l) > 0:
username = l[0]
if len(l) > 1:
password = l[1]
if username.find('/') > 0:
domain, username = username.split('/')
if password == '' and username != '':
from getpass import getpass
password = getpass("Password:")
self.smb.login(username, password, domain=domain)
self.password = password
self.username = username
if self.smb.isGuestSession() > 0:
logging.info("GUEST Session Granted")
else:
logging.info("USER Session Granted")
self.loggedIn = True
def do_kerberos_login(self,line):
if self.smb is None:
logging.error("No connection open")
return
l = line.split(' ')
username = ''
password = ''
domain = ''
if len(l) > 0:
username = l[0]
if len(l) > 1:
password = l[1]
if username.find('/') > 0:
domain, username = username.split('/')
if domain == '':
logging.error("Domain must be specified for Kerberos login")
return
if password == '' and username != '':
from getpass import getpass
password = getpass("Password:")
self.smb.kerberosLogin(username, password, domain=domain)
self.password = password
self.username = username
if self.smb.isGuestSession() > 0:
logging.info("GUEST Session Granted")
else:
logging.info("USER Session Granted")
self.loggedIn = True
def do_login_hash(self,line):
if self.smb is None:
logging.error("No connection open")
return
l = line.split(' ')
domain = ''
if len(l) > 0:
username = l[0]
if len(l) > 1:
hashes = l[1]
else:
logging.error("Hashes needed. Format is lmhash:nthash")
return
if username.find('/') > 0:
domain, username = username.split('/')
lmhash, nthash = hashes.split(':')
self.smb.login(username, '', domain,lmhash=lmhash, nthash=nthash)
self.username = username
self.lmhash = lmhash
self.nthash = nthash
if self.smb.isGuestSession() > 0:
logging.info("GUEST Session Granted")
else:
logging.info("USER Session Granted")
self.loggedIn = True
def do_logoff(self, line):
if self.smb is None:
logging.error("No connection open")
return
self.smb.logoff()
del(self.smb)
self.share = None
self.smb = None
self.tid = None
self.pwd = ''
self.loggedIn = False
self.password = None
self.lmhash = None
self.nthash = None
self.username = None
def do_info(self, line):
if self.loggedIn is False:
logging.error("Not logged in")
return
rpctransport = transport.SMBTransport(self.smb.getRemoteHost(), filename=r'\srvsvc', smb_connection=self.smb)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
resp = srvs.hNetrServerGetInfo(dce, 102)
print("Version Major: %d" % resp['InfoStruct']['ServerInfo102']['sv102_version_major'])
print("Version Minor: %d" % resp['InfoStruct']['ServerInfo102']['sv102_version_minor'])
print("Server Name: %s" % resp['InfoStruct']['ServerInfo102']['sv102_name'])
print("Server Comment: %s" % resp['InfoStruct']['ServerInfo102']['sv102_comment'])
print("Server UserPath: %s" % resp['InfoStruct']['ServerInfo102']['sv102_userpath'])
print("Simultaneous Users: %d" % resp['InfoStruct']['ServerInfo102']['sv102_users'])
def do_who(self, line):
if self.loggedIn is False:
logging.error("Not logged in")
return
rpctransport = transport.SMBTransport(self.smb.getRemoteHost(), filename=r'\srvsvc', smb_connection=self.smb)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
resp = srvs.hNetrSessionEnum(dce, NULL, NULL, 10)
for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
print("host: %15s, user: %5s, active: %5d, idle: %5d" % (session['sesi10_cname'][:-1], session['sesi10_username'][:-1], session['sesi10_time'], session['sesi10_idle_time']))
def do_shares(self, line):
if self.loggedIn is False:
logging.error("Not logged in")
return
resp = self.smb.listShares()
for i in range(len(resp)):
print(resp[i]['shi1_netname'][:-1])
def do_use(self,line):
if self.loggedIn is False:
logging.error("Not logged in")
return
self.share = line
self.tid = self.smb.connectTree(line)
self.pwd = '\\'
self.do_ls('', False)
def complete_cd(self, text, line, begidx, endidx):
return self.complete_get(text, line, begidx, endidx, include=2)
def do_cd(self, line):
if self.tid is None:
logging.error("No share selected")
return
p = string.replace(line,'/','\\')
oldpwd = self.pwd
if p[0] == '\\':
self.pwd = line
else:
self.pwd = ntpath.join(self.pwd, line)
self.pwd = ntpath.normpath(self.pwd)
# Let's try to open the directory to see if it's valid
try:
fid = self.smb.openFile(self.tid, self.pwd)
self.smb.closeFile(self.tid,fid)
self.pwd = oldpwd
logging.error("Invalid directory")
except SessionError as e:
if e.getErrorCode() == nt_errors.STATUS_FILE_IS_A_DIRECTORY:
pass
else:
self.pwd = oldpwd
raise
except Exception as e:
self.pwd = oldpwd
raise
def do_pwd(self,line):
if self.loggedIn is False:
logging.error("Not logged in")
return
print(self.pwd)
def do_ls(self, wildcard, display=True):
if self.loggedIn is False:
logging.error("Not logged in")
return
if self.tid is None:
logging.error("No share selected")
return
if wildcard == '':
pwd = ntpath.join(self.pwd,'*')
else:
pwd = ntpath.join(self.pwd, wildcard)
self.completion = []
pwd = string.replace(pwd,'/','\\')
pwd = ntpath.normpath(pwd)
for f in self.smb.listPath(self.share, pwd):
if display is True:
print("%crw-rw-rw- %10d %s %s" % ('d' if f.is_directory() > 0 else '-', f.get_filesize(), time.ctime(float(f.get_mtime_epoch())),f.get_longname()))
self.completion.append((f.get_longname(),f.is_directory()))
def do_rm(self, filename):
if self.tid is None:
logging.error("No share selected")
return
f = ntpath.join(self.pwd, filename)
file = string.replace(f,'/','\\')
self.smb.deleteFile(self.share, file)
def do_mkdir(self, path):
if self.tid is None:
logging.error("No share selected")
return
p = ntpath.join(self.pwd, path)
pathname = string.replace(p,'/','\\')
self.smb.createDirectory(self.share,pathname)
def do_rmdir(self, path):
if self.tid is None:
logging.error("No share selected")
return
p = ntpath.join(self.pwd, path)
pathname = string.replace(p,'/','\\')
self.smb.deleteDirectory(self.share, pathname)
def do_put(self, pathname):
if self.tid is None:
logging.error("No share selected")
return
src_path = pathname
dst_name = os.path.basename(src_path)
fh = open(pathname, 'rb')
f = ntpath.join(self.pwd,dst_name)
finalpath = string.replace(f,'/','\\')
self.smb.putFile(self.share, finalpath, fh.read)
fh.close()
def complete_get(self, text, line, begidx, endidx, include=1):
# include means
# 1 just files
# 2 just directories
p = string.replace(line,'/','\\')
if p.find('\\') < 0:
items = []
if include == 1:
mask = 0
else:
mask = 0x010
for i in self.completion:
if i[1] == mask:
items.append(i[0])
if text:
return [
item for item in items
if item.upper().startswith(text.upper())
]
else:
return items
def do_get(self, filename):
if self.tid is None:
logging.error("No share selected")
return
filename = string.replace(filename,'/','\\')
fh = open(ntpath.basename(filename),'wb')
pathname = ntpath.join(self.pwd,filename)
try:
self.smb.getFile(self.share, pathname, fh.write)
except:
fh.close()
os.remove(filename)
raise
fh.close()
def do_close(self, line):
self.do_logoff(line)
def main():
print(version.BANNER)
parser = argparse.ArgumentParser(add_help=True, description="SMB client implementation.")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('-file', type=argparse.FileType('r'), help='input file with commands to execute in the mini shell')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar="LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file (KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar="hex key", help='AES key to use for Kerberos Authentication (128 or 256 bits)')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
if options.hashes is not None:
lmhash, nthash = options.hashes.split(':')
else:
lmhash = ''
nthash = ''
try:
smbClient = SMBConnection(address, address)
if options.k is True:
smbClient.kerberosLogin(username, password, domain, lmhash, nthash, options.aesKey)
else:
smbClient.login(username, password, domain, lmhash, nthash)
shell = MiniImpacketShell(smbClient)
if options.file is not None:
logging.info("Executing commands from %s" % options.file.name)
for line in options.file.readlines():
if line[0] != '#':
print("# %s" % line, end=' ')
shell.onecmd(line)
else:
print(line, end=' ')
else:
shell.cmdloop()
except Exception as e:
#import traceback
# print traceback.print_exc()
logging.error(str(e))
if __name__ == "__main__":
main()
| 34.045098
| 251
| 0.587802
|
8caa963c4514b49830494944059e05dd66e60d16
| 9,657
|
py
|
Python
|
wntr/tests/test_network_elements.py
|
algchyhao/WNTR
|
dd4db188a8641a4da16cf80a1557c908fa48c17d
|
[
"BSD-3-Clause"
] | null | null | null |
wntr/tests/test_network_elements.py
|
algchyhao/WNTR
|
dd4db188a8641a4da16cf80a1557c908fa48c17d
|
[
"BSD-3-Clause"
] | null | null | null |
wntr/tests/test_network_elements.py
|
algchyhao/WNTR
|
dd4db188a8641a4da16cf80a1557c908fa48c17d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test the wntr.network.elements classes
"""
from __future__ import print_function
import nose.tools
from nose import SkipTest
from nose.tools import *
from os.path import abspath, dirname, join
import numpy as np
import wntr
import wntr.network.elements as elements
from wntr.network.options import TimeOptions
testdir = dirname(abspath(str(__file__)))
datadir = join(testdir,'networks_for_testing')
net1dir = join(testdir,'..','..','examples','networks')
def test_Curve():
pts1 = [[3,5]]
pts2 = [[3, 6], [7, 2], [10, 0]]
pts3 = [[3, 6], [7, 2], [10, 1]]
expected_str = "<Curve: 'curve2', curve_type='HEAD', points=[[3, 6], [7, 2], [10, 0]]>"
# Create the curves
curve1 = elements.Curve('curve1', 'PUMP', pts1)
curve2a = elements.Curve('curve2', 'HEAD', pts2)
curve2b = elements.Curve('curve2', 'HEAD', pts3)
curve2c = elements.Curve('curve2', 'HEAD', pts3)
# Test that the assignments are working
nose.tools.assert_list_equal(curve2b.points, pts3)
nose.tools.assert_equal(curve1.num_points, 1)
nose.tools.assert_equal(len(curve2c), 3)
# Testing __eq__
nose.tools.assert_not_equal(curve1, curve2a)
nose.tools.assert_not_equal(curve2a, curve2b)
nose.tools.assert_equal(curve2b, curve2c)
# testing __getitem__ and __getslice__
nose.tools.assert_list_equal(curve2a[0], [3,6])
nose.tools.assert_list_equal(curve2a[:2], [[3, 6], [7, 2]])
# verify that the points are being deep copied
nose.tools.assert_not_equal(id(curve2b.points), id(curve2c.points))
def test_Pattern():
pattern_points1 = [1, 2, 3, 4, 3, 2, 1]
pattern_points2 = [1.0, 1.2, 1.0 ]
pattern_points3 = 3.2
# test constant pattern creation
timing1 = TimeOptions()
timing1.pattern_start = 0
timing1.pattern_timestep = 1
timing2 = TimeOptions()
timing2.pattern_start = 0
timing2.pattern_timestep = 5
pattern1a = elements.Pattern('constant', multipliers=pattern_points3, time_options=timing1)
pattern1b = elements.Pattern('constant', multipliers=[pattern_points3], time_options=(0,1))
nose.tools.assert_list_equal(pattern1a.multipliers.tolist(), [pattern_points3])
nose.tools.assert_true(np.all(pattern1a.multipliers == pattern1b.multipliers))
nose.tools.assert_false(id(pattern1a.multipliers) == id(pattern1b.multipliers))
nose.tools.assert_equal(pattern1a.time_options, pattern1b.time_options)
# def multipliers setter
pattern2a = elements.Pattern('oops', multipliers=pattern_points3, time_options=(0,5))
pattern2b = elements.Pattern('oops', multipliers=pattern_points1, time_options=timing2)
pattern2a.multipliers = pattern_points1
nose.tools.assert_equal(pattern2a.time_options, pattern2b.time_options)
# test pattern evaluations
expected_value = pattern_points1[2]
nose.tools.assert_equal(pattern2a[2], expected_value)
nose.tools.assert_equal(pattern2b.at(10), expected_value)
nose.tools.assert_equal(pattern2b.at(12.5), expected_value)
nose.tools.assert_equal(pattern2b(14), expected_value)
nose.tools.assert_equal(pattern2b(9*5), expected_value)
nose.tools.assert_not_equal(pattern2b.at(15), expected_value)
pattern3 = elements.Pattern('nowrap', multipliers=pattern_points2, time_options=(0, 100), wrap=False)
nose.tools.assert_equal(pattern3[5], 0.0)
nose.tools.assert_equal(pattern3[-39], 0.0)
nose.tools.assert_equal(pattern3(-39), 0.0)
nose.tools.assert_equal(pattern3.at(50), 1.0)
pattern4 = elements.Pattern('constant')
nose.tools.assert_equal(len(pattern4), 0)
nose.tools.assert_equal(pattern4(492), 1.0)
pattern5a = elements.Pattern('binary', [0.,0.,1.,1.,1.,1.,0.,0.,0.], time_options=(0, 1), wrap=False)
pattern5b = elements.Pattern.binary_pattern('binary', step_size=1, start_time=2, end_time=6, duration=9)
nose.tools.assert_false(pattern5a.__eq__(pattern5b))
nose.tools.assert_true(np.all(np.abs(pattern5a.multipliers - pattern5b.multipliers)<1.0e-10))
def test_TimeSeries():
wn = wntr.network.WaterNetworkModel()
pattern_points2 = [1.0, 1.2, 1.0 ]
wn.add_pattern('oops', pattern_points2)
pattern2 = wn.get_pattern('oops')
pattern5 = elements.Pattern.binary_pattern('binary', step_size=1, start_time=2, end_time=6, duration=9)
wn.add_pattern('binary', pattern5)
base1 = 2.0
# test constructor and setters, getters
tvv1 = elements.TimeSeries(wn.patterns, base1, None, None)
tvv2 = elements.TimeSeries(wn.patterns, base1, 'oops', 'tvv2')
nose.tools.assert_raises(ValueError, elements.TimeSeries, *('A', None, None))
nose.tools.assert_raises(ValueError, elements.TimeSeries, *(1.0, 'A', None))
nose.tools.assert_equals(tvv1._base, base1)
nose.tools.assert_equal(tvv1.base_value, tvv1._base)
nose.tools.assert_equals(tvv1.pattern_name, None)
nose.tools.assert_equals(tvv1.pattern, None)
nose.tools.assert_equals(tvv1.category, None)
tvv1.base_value = 3.0
nose.tools.assert_equals(tvv1.base_value, 3.0)
tvv1.pattern_name = 'binary'
nose.tools.assert_equals(tvv1.pattern_name, 'binary')
tvv1.category ='binary'
nose.tools.assert_equals(tvv1.category, 'binary')
# Test getitem
print(tvv1)
print(tvv2, pattern2)
nose.tools.assert_equals(tvv1.at(1), 0.0)
nose.tools.assert_equals(tvv1.at(7202), 3.0)
nose.tools.assert_equals(tvv2.at(1), 2.0)
print(tvv2, pattern2.time_options)
nose.tools.assert_equals(tvv2.at(3602), 2.4)
price1 = elements.TimeSeries(wn.patterns, 35.0, None)
price2 = elements.TimeSeries(wn.patterns, 35.0, None)
nose.tools.assert_equal(price1, price2)
nose.tools.assert_equal(price1.base_value, 35.0)
speed1 = elements.TimeSeries(wn.patterns, 35.0, pattern5)
speed2 = elements.TimeSeries(wn.patterns, 35.0, pattern5)
nose.tools.assert_equal(speed1, speed2)
nose.tools.assert_equal(speed1.base_value, 35.0)
head1 = elements.TimeSeries(wn.patterns, 35.0, pattern2)
head2 = elements.TimeSeries(wn.patterns, 35.0, pattern2)
nose.tools.assert_equal(head1, head2)
nose.tools.assert_equal(head1.base_value, 35.0)
demand1 = elements.TimeSeries(wn.patterns, 1.35, pattern2)
demand2 = elements.TimeSeries(wn.patterns, 1.35, pattern2)
nose.tools.assert_equal(demand1, demand2)
nose.tools.assert_equal(demand1.base_value, 1.35)
# expected_values1 = np.array([1.35, 1.62, 1.35, 1.35, 1.62])
# demand_values1 = demand2.get_values(0, 40, 10)
# nose.tools.assert_true(np.all(np.abs(expected_values1-demand_values1)<1.0e-10))
# expected_values1 = np.array([1.35, 1.35, 1.62, 1.62, 1.35, 1.35, 1.35, 1.35, 1.62])
# demand_values1 = demand2.get_values(0, 40, 5)
# nose.tools.assert_true(np.all(np.abs(expected_values1-demand_values1)<1.0e-10))
#
# source1 = elements.Source('source1', 'NODE-1131', 'CONCEN', 1000.0, pattern5)
# source2 = elements.Source('source1', 'NODE-1131', 'CONCEN', 1000.0, pattern5)
# nose.tools.assert_equal(source1, source2)
# nose.tools.assert_equal(source1.strength_timeseries.base_value, 1000.0)
def test_Demands():
wn = wntr.network.WaterNetworkModel()
pattern_points1 = [0.5, 1.0, 0.4, 0.2 ]
pattern1 = elements.Pattern('1', multipliers=pattern_points1, time_options=(0,10))
pattern_points2 = [1.0, 1.2, 1.0 ]
pattern2 = elements.Pattern('2', multipliers=pattern_points2, time_options=(0,10))
demand1 = elements.TimeSeries(wn.patterns, 2.5, pattern1, '_base_demand')
demand2 = elements.TimeSeries(wn.patterns, 1.0, pattern2, 'residential')
demand3 = elements.TimeSeries(wn.patterns, 0.8, pattern2, 'residential')
expected1 = 2.5 * np.array(pattern_points1*3)
expected2 = 1.0 * np.array(pattern_points2*4)
expected3 = 0.8 * np.array(pattern_points2*4)
expectedtotal = expected1 + expected2 + expected3
expectedresidential = expected2 + expected3
demandlist1 = elements.Demands(wn.patterns, demand1, demand2, demand3 )
demandlist2 = elements.Demands(wn.patterns)
demandlist2.append(demand1)
demandlist2.append(demand1)
demandlist2[1] = demand2
demandlist2.append((0.8, pattern2, 'residential'))
nose.tools.assert_list_equal(list(demandlist1), list(demandlist2))
demandlist2.extend(demandlist1)
nose.tools.assert_equal(len(demandlist1), 3)
nose.tools.assert_equal(len(demandlist2), 6)
del demandlist2[3]
del demandlist2[3]
del demandlist2[3]
del demandlist2[0]
demandlist2.insert(0, demand1)
nose.tools.assert_list_equal(list(demandlist1), list(demandlist2))
demandlist2.clear()
nose.tools.assert_equal(len(demandlist2), 0)
nose.tools.assert_false(demandlist2)
raise SkipTest
nose.tools.assert_equal(demandlist1.at(5), expectedtotal[0])
nose.tools.assert_equal(demandlist1.at(13), expectedtotal[1])
nose.tools.assert_equal(demandlist1.at(13, 'residential'), expectedresidential[1])
nose.tools.assert_true(np.all(np.abs(demandlist1.get_values(0,110,10)-expectedtotal)<1.0e-10))
nose.tools.assert_list_equal(demandlist1.base_demand_list(), [2.5, 1.0, 0.8])
nose.tools.assert_list_equal(demandlist1.base_demand_list('_base_demand'), [2.5])
nose.tools.assert_list_equal(demandlist1.pattern_list(), [pattern1, pattern2, pattern2])
nose.tools.assert_list_equal(demandlist1.pattern_list(category='residential'), [pattern2, pattern2])
nose.tools.assert_list_equal(demandlist1.category_list(), ['_base_demand','residential','residential'])
def test_Enums():
pass
if __name__ == '__main__':
test_Demands()
| 44.916279
| 111
| 0.712954
|
808a01cc5b4091223012d09c7131724522f9ba4d
| 955
|
py
|
Python
|
sympy/ntheory/__init__.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 1
|
2016-02-22T22:46:50.000Z
|
2016-02-22T22:46:50.000Z
|
sympy/ntheory/__init__.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 7
|
2017-05-01T14:15:32.000Z
|
2017-09-06T20:44:24.000Z
|
sympy/ntheory/__init__.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T13:36:07.000Z
|
2021-05-26T13:36:07.000Z
|
"""
Number theory module (primes, etc)
"""
from .generate import nextprime, prevprime, prime, primepi, primerange, \
randprime, Sieve, sieve, primorial, cycle_length
from .primetest import isprime
from .factor_ import divisors, factorint, multiplicity, perfect_power, \
pollard_pm1, pollard_rho, primefactors, totient, trailing, divisor_count, \
divisor_sigma, factorrat
from .partitions_ import npartitions
from .residue_ntheory import is_primitive_root, is_quad_residue, \
legendre_symbol, jacobi_symbol, n_order, sqrt_mod, quadratic_residues, \
primitive_root, nthroot_mod, is_nthpow_residue, sqrt_mod_iter, mobius
from .multinomial import binomial_coefficients, binomial_coefficients_list, \
multinomial_coefficients
from .continued_fraction import continued_fraction_periodic, \
continued_fraction_iterator, continued_fraction_reduce, \
continued_fraction_convergents
from .egyptian_fraction import egyptian_fraction
| 45.47619
| 79
| 0.81466
|
6fdbd369f85a99e66367a0cc7789173c63d10912
| 77
|
py
|
Python
|
dynamic_stock_model/tests/__init__.py
|
thomasgibon/pyDSM
|
c3d1d16af5808051c5b8028d6a12ffe458e6d5ba
|
[
"BSD-3-Clause"
] | 16
|
2016-04-11T08:06:22.000Z
|
2021-07-16T09:20:55.000Z
|
dynamic_stock_model/tests/__init__.py
|
thomasgibon/pyDSM
|
c3d1d16af5808051c5b8028d6a12ffe458e6d5ba
|
[
"BSD-3-Clause"
] | 2
|
2016-04-12T19:39:53.000Z
|
2016-08-17T14:30:12.000Z
|
dynamic_stock_model/tests/__init__.py
|
thomasgibon/pyDSM
|
c3d1d16af5808051c5b8028d6a12ffe458e6d5ba
|
[
"BSD-3-Clause"
] | 9
|
2015-07-22T07:28:02.000Z
|
2020-09-11T14:08:07.000Z
|
# -*- coding: utf-8 -*-
from .test_known_results import KnownResultsTestCase
| 25.666667
| 52
| 0.753247
|
de811e8cc4640f49ad089b284ba422d92fd573d4
| 30,178
|
py
|
Python
|
BIT_OpenDomain_QA/Mrc/utils_duqa.py
|
rwei1218/transformers
|
511e100c650b3f942c432d8f71eee3ea1c0005a8
|
[
"Apache-2.0"
] | null | null | null |
BIT_OpenDomain_QA/Mrc/utils_duqa.py
|
rwei1218/transformers
|
511e100c650b3f942c432d8f71eee3ea1c0005a8
|
[
"Apache-2.0"
] | null | null | null |
BIT_OpenDomain_QA/Mrc/utils_duqa.py
|
rwei1218/transformers
|
511e100c650b3f942c432d8f71eee3ea1c0005a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Load Duqa dataset. """
from __future__ import absolute_import, division, print_function
import collections
import json
import logging
import math
from io import open
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
class BaiduExample(object):
"""
A single training/test example for the baidu dataset
包括ID, 问题,分词过的文本,fake answer, SP,EP
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
def read_baidu_examples(input_file, is_training):
"""Read a baidu json file into a list of BaiduExample."""
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
with open(input_file, "r", encoding='utf-8') as reader:
examples = []
for line in reader:
example = json.loads(line)
qas_id = example['question_id']
question_text = example['question']
context_tokens = example['doc_tokens']
start_position = None
end_position = None
orig_answer_text = None
#若不是训练,那么数据应该只包含问题,文本,以上三个信息都为None
#若是训练的话,
if is_training:
orig_answer_text = example['fake_answer'][0]
start_position = int(example['answer_span'][0])
end_position = int(example['answer_span'][1])
# 检测一下给出的fake answer 能否在文中找出来。 找不出来就跳过。
actual_text = "".join(context_tokens[start_position:(end_position+1)])
cleaned_answer_text = orig_answer_text
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
per_example = BaiduExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=context_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
)
examples.append(per_example)
return examples
def read_baidu_examples_pred(raw_data, is_training):
"""直接从[dir, dir...]读取数据"""
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for example in raw_data:
# seg para就是 分词后的文本
qas_id = example['question_id']
question_text = example['question']
context_tokens = example['doc_tokens']
start_position = None
end_position = None
orig_answer_text = None
# 若不是训练,那么数据应该只包含问题,文本,以上三个信息都为None
# 若是训练的话,
if is_training:
orig_answer_text = example['fake_answer'][0]
start_position = int(example['answer_span'][0])
end_position = int(example['answer_span'][1])
# 检测一下给出的fake answer 能否在文中找出来。 找不出来就跳过。
actual_text = "".join(
context_tokens[start_position:(end_position + 1)])
cleaned_answer_text = orig_answer_text
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
per_example = BaiduExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=context_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
)
examples.append(per_example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or
example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example_index < 10:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
#因为word piece tokenizer,要进一步拆词,对应的answer span 需要改变。
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, verbose_logging):
"""Write final predictions to the json file."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
i = 0
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4,ensure_ascii=False) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4,ensure_ascii=False) + "\n")
def convert_output(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, verbose_logging):
"""TBD"""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
i = 0
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| 39.448366
| 92
| 0.595666
|
d0709d3f7d06fcb04b1abb84761fe8380da1e0e9
| 72,208
|
py
|
Python
|
tests/test_manager.py
|
pedrokiefer/rpaas
|
7e2285ef4fbd3959109eead15c82d8ff3accdc94
|
[
"BSD-3-Clause"
] | 39
|
2015-01-22T14:06:01.000Z
|
2021-03-26T15:35:24.000Z
|
tests/test_manager.py
|
pedrokiefer/rpaas
|
7e2285ef4fbd3959109eead15c82d8ff3accdc94
|
[
"BSD-3-Clause"
] | 31
|
2015-01-27T12:23:38.000Z
|
2018-11-05T12:50:58.000Z
|
tests/test_manager.py
|
pedrokiefer/rpaas
|
7e2285ef4fbd3959109eead15c82d8ff3accdc94
|
[
"BSD-3-Clause"
] | 21
|
2015-06-16T20:42:37.000Z
|
2021-08-08T22:12:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 rpaas authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import copy
import consul
import unittest
import os
import mock
import rpaas.manager
from rpaas.manager import Manager, ScaleError, QuotaExceededError
from rpaas import tasks, storage, nginx
from rpaas.consul_manager import InstanceAlreadySwappedError, CertificateNotFoundError
tasks.app.conf.CELERY_ALWAYS_EAGER = True
class ManagerTestCase(unittest.TestCase):
def setUp(self):
self.master_token = "rpaas-test"
os.environ["MONGO_DATABASE"] = "host_manager_test"
os.environ.setdefault("CONSUL_HOST", "127.0.0.1")
os.environ.setdefault("CONSUL_TOKEN", self.master_token)
os.environ.setdefault("RPAAS_SERVICE_NAME", "test-suite-rpaas")
self.storage = storage.MongoDBStorage()
self.consul = consul.Consul(token=self.master_token)
self.consul.kv.delete("test-suite-rpaas", recurse=True)
colls = self.storage.db.collection_names(False)
for coll in colls:
self.storage.db.drop_collection(coll)
plan = {"_id": "small",
"description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
self.plan = copy.deepcopy(plan)
self.plan["name"] = plan["_id"]
del self.plan["_id"]
self.storage.db[self.storage.plans_collection].insert(plan)
flavor = {"_id": "vanilla",
"description": "nginx 1.10",
"config": {"nginx_version": "1.10"}}
self.flavor = copy.deepcopy(flavor)
self.flavor["name"] = flavor["_id"]
del self.flavor["_id"]
self.storage.db[self.storage.flavors_collection].insert(flavor)
self.lb_patcher = mock.patch("rpaas.tasks.LoadBalancer")
self.host_patcher = mock.patch("rpaas.tasks.Host")
self.LoadBalancer = self.lb_patcher.start()
self.Host = self.host_patcher.start()
self.config = {
"RPAAS_SERVICE_NAME": "test-suite-rpaas",
"HOST_MANAGER": "my-host-manager",
"LB_MANAGER": "my-lb-manager",
"serviceofferingid": "abcdef123459",
"CONSUL_HOST": "127.0.0.1",
"CONSUL_TOKEN": "rpaas-test",
}
self.maxDiff = None
def tearDown(self):
self.lb_patcher.stop()
self.host_patcher.stop()
os.environ['CHECK_ACL_API'] = "0"
@mock.patch("rpaas.tasks.nginx")
def test_new_instance(self, nginx):
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
lb.dsr = False
manager.new_instance("x")
host = self.Host.create.return_value
config = copy.deepcopy(self.config)
config["HOST_TAGS"] = "rpaas_service:test-suite-rpaas,rpaas_instance:x,consul_token:abc-123"
self.Host.create.assert_called_with("my-host-manager", "x", config)
self.LoadBalancer.create.assert_called_with("my-lb-manager", "x", config)
lb.add_host.assert_called_with(host)
self.assertEquals(self.storage.find_task("x").count(), 0)
nginx.Nginx.assert_called_once_with(config)
nginx_manager = nginx.Nginx.return_value
nginx_manager.wait_healthcheck.assert_called_once_with(host.dns_name, timeout=600)
manager.consul_manager.write_healthcheck.assert_called_once_with("x")
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_host_create_fail_and_raises(self, nginx):
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
self.Host.create.side_effect = Exception("Host create failure")
host = self.Host.create.return_value
manager.new_instance("x")
lb.add_host.assert_not_called()
lb.destroy.assert_not_called()
host.destroy.assert_not_called()
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_host_create_fail_and_rollback(self, nginx):
config = copy.deepcopy(self.config)
config["RPAAS_ROLLBACK_ON_ERROR"] = "1"
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
self.Host.create.side_effect = Exception("Host create failure")
host = self.Host.create.return_value
manager.new_instance("x")
lb.assert_not_called()
lb.add_host.assert_not_called()
lb.destroy.assert_called_once()
host.destroy.assert_not_called()
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_lb_create_fail_and_rollback(self, nginx):
config = copy.deepcopy(self.config)
config["RPAAS_ROLLBACK_ON_ERROR"] = "1"
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
self.LoadBalancer.create.side_effect = Exception("LB create failure")
lb = self.LoadBalancer.create.return_value
manager.new_instance("x")
lb.add_host.assert_not_called()
lb.destroy.assert_not_called()
self.Host.create.assert_not_called()
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.tasks.hc.Dumb")
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_hc_create_fail_and_rollback(self, nginx, hc):
config = copy.deepcopy(self.config)
config["RPAAS_ROLLBACK_ON_ERROR"] = "1"
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
host = self.Host.create.return_value
host.dns_name = "10.0.0.1"
dumb_hc = hc.return_value
dumb_hc.create.side_effect = Exception("HC create failure")
manager.new_instance("x")
self.LoadBalancer.create.assert_called_once()
lb.add_host.assert_not_called()
lb.destroy.assert_called_once()
host.create.assert_not_called()
lb.remove_host.assert_not_called()
dumb_hc.destroy.assert_called_once()
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.tasks.hc.Dumb")
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_nginx_wait_healthcheck_fail_and_rollback(self, nginx, hc):
config = copy.deepcopy(self.config)
config["RPAAS_ROLLBACK_ON_ERROR"] = "1"
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
host = self.Host.create.return_value
host.dns_name = "10.0.0.1"
dumb_hc = hc.return_value
nginx_manager = nginx.Nginx.return_value
nginx_manager.wait_healthcheck.side_effect = Exception("Nginx timeout")
manager.new_instance("x")
self.LoadBalancer.create.assert_called_once()
lb.add_host.assert_called_once()
dumb_hc.add_url.assert_not_called()
lb.destroy.assert_called_once()
host.destroy.assert_called_once()
lb.remove_host.assert_not_called()
dumb_hc.destroy.assert_called_once()
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_with_plan_and_flavor(self, nginx):
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
lb.dsr = False
manager.new_instance("x", plan_name="small", flavor_name="vanilla")
host = self.Host.create.return_value
config = copy.deepcopy(self.config)
config.update(self.plan["config"])
config.update(self.flavor["config"])
config["HOST_TAGS"] = "rpaas_service:test-suite-rpaas,rpaas_instance:x,consul_token:abc-123"
self.Host.create.assert_called_with("my-host-manager", "x", config)
self.LoadBalancer.create.assert_called_with("my-lb-manager", "x", config)
lb.add_host.assert_called_with(host)
self.assertEquals(manager.storage.find_task("x").count(), 0)
nginx.Nginx.assert_called_once_with(config)
nginx_manager = nginx.Nginx.return_value
nginx_manager.wait_healthcheck.assert_called_once_with(host.dns_name, timeout=600)
metadata = manager.storage.find_instance_metadata("x")
self.assertEqual({"_id": "x", "plan_name": "small",
"consul_token": "abc-123", "flavor_name": "vanilla"}, metadata)
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_with_extra_tags(self, nginx):
config = copy.deepcopy(self.config)
config["INSTANCE_EXTRA_TAGS"] = "enable_monitoring:1"
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
lb.dsr = False
manager.new_instance("x")
host = self.Host.create.return_value
config["HOST_TAGS"] = ("rpaas_service:test-suite-rpaas,rpaas_instance:x,"
"consul_token:abc-123,enable_monitoring:1")
del config["INSTANCE_EXTRA_TAGS"]
self.Host.create.assert_called_with("my-host-manager", "x", config)
self.LoadBalancer.create.assert_called_with("my-lb-manager", "x", config)
lb.add_host.assert_called_with(host)
self.assertEquals(manager.storage.find_task("x").count(), 0)
nginx.Nginx.assert_called_once_with(config)
nginx_manager = nginx.Nginx.return_value
nginx_manager.wait_healthcheck.assert_called_once_with(host.dns_name, timeout=600)
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_with_dsr_enabled(self, nginx):
config = copy.deepcopy(self.config)
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
lb = self.LoadBalancer.create.return_value
lb.dsr = True
lb.address = "172.2.3.1"
manager.new_instance("x")
config["HOST_TAGS"] = ("rpaas_service:test-suite-rpaas,rpaas_instance:x,"
"consul_token:abc-123")
self.LoadBalancer.create.assert_called_once_with("my-lb-manager", "x", config)
host = self.Host.create.return_value
config["HOST_TAGS"] = ("rpaas_service:test-suite-rpaas,rpaas_instance:x,"
"consul_token:abc-123,dsr_ip:172.2.3.1")
self.Host.create.assert_called_with("my-host-manager", "x", config)
lb.add_host.assert_called_with(host)
self.assertEquals(manager.storage.find_task("x").count(), 0)
config["HOST_TAGS"] = ("rpaas_service:test-suite-rpaas,rpaas_instance:x,"
"consul_token:abc-123")
nginx.Nginx.assert_called_once_with(config)
nginx_manager = nginx.Nginx.return_value
nginx_manager.wait_healthcheck.assert_called_once_with(host.dns_name, timeout=600)
def test_new_instance_plan_not_found(self):
manager = Manager(self.config)
with self.assertRaises(storage.PlanNotFoundError):
manager.new_instance("x", plan_name="supersmall")
def test_new_instance_flavor_not_found(self):
manager = Manager(self.config)
with self.assertRaises(storage.FlavorNotFoundError):
manager.new_instance("x", flavor_name="orange")
@mock.patch("rpaas.tasks.nginx")
def test_new_instance_over_quota(self, nginx):
manager = Manager(self.config)
for name in ["a", "b", "c", "d", "e"]:
manager.new_instance(name, "myteam")
with self.assertRaises(QuotaExceededError) as cm:
manager.new_instance("f", "myteam")
self.assertEqual(str(cm.exception), "quota execeeded 5/5 used")
manager.new_instance("f", "otherteam")
def test_new_instance_error_already_running(self):
self.storage.store_task("x")
manager = Manager(self.config)
with self.assertRaises(storage.DuplicateError):
manager.new_instance("x")
@mock.patch("rpaas.manager.LoadBalancer")
def test_new_instance_error_already_exists(self, LoadBalancer):
LoadBalancer.find.return_value = "something"
manager = Manager(self.config)
with self.assertRaises(storage.DuplicateError):
manager.new_instance("x")
LoadBalancer.find.assert_called_once_with("x")
@mock.patch("rpaas.manager.LoadBalancer")
def test_update_instance(self, LoadBalancer):
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}}
)
self.storage.db[self.storage.flavors_collection].insert(
{"_id": "orange",
"description": "nginx 1.12",
"config": {"nginx_version": "1.12"}}
)
LoadBalancer.find.return_value = "something"
self.storage.store_instance_metadata("x", plan_name=self.plan["name"], consul_token="abc-123")
manager = Manager(self.config)
manager.update_instance("x", "huge")
return_metadata = {'_id': 'x', 'plan_name': 'huge', 'consul_token': 'abc-123'}
self.assertEquals(self.storage.find_instance_metadata("x"), return_metadata)
manager.update_instance("x", None, "orange")
return_metadata = {'_id': 'x', 'plan_name': 'huge', 'flavor_name': 'orange', 'consul_token': 'abc-123'}
self.assertEquals(self.storage.find_instance_metadata("x"), return_metadata)
manager.update_instance("x", "small", "vanilla")
return_metadata = {'_id': 'x', 'plan_name': 'small', 'flavor_name': 'vanilla', 'consul_token': 'abc-123'}
self.assertEquals(self.storage.find_instance_metadata("x"), return_metadata)
@mock.patch("rpaas.manager.LoadBalancer")
def test_update_instance_invalid_plan(self, LoadBalancer):
LoadBalancer.find.return_value = "something"
self.storage.store_instance_metadata("x", plan_name=self.plan["name"], consul_token="abc-123")
manager = Manager(self.config)
with self.assertRaises(storage.PlanNotFoundError):
manager.update_instance("x", "large")
@mock.patch("rpaas.manager.LoadBalancer")
def test_update_instance_invalid_flavor(self, LoadBalancer):
LoadBalancer.find.return_value = "something"
self.storage.store_instance_metadata("x", flavor_name=self.flavor["name"], consul_token="abc-123")
manager = Manager(self.config)
with self.assertRaises(storage.FlavorNotFoundError):
manager.update_instance("x", None, "orange")
@mock.patch("rpaas.manager.LoadBalancer")
def test_update_instance_not_found(self, LoadBalancer):
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}}
)
LoadBalancer.find.return_value = None
self.storage.store_instance_metadata("x", plan_name=self.plan["name"], consul_token="abc-123")
manager = Manager(self.config)
with self.assertRaises(storage.InstanceNotFoundError):
manager.update_instance("x", "huge")
@mock.patch.object(rpaas.manager.consul_manager.ConsulManager, 'destroy_token', return_value=None)
@mock.patch.object(rpaas.tasks.consul_manager.ConsulManager, 'destroy_instance', return_value=None)
def test_remove_instance(self, destroy_instance, destroy_token):
self.storage.store_instance_metadata("x", plan_name="small", consul_token="abc-123")
self.storage.store_le_certificate("x", "foobar.com")
self.storage.store_le_certificate("x", "example.com")
self.storage.store_le_certificate("y", "test.com")
lb = self.LoadBalancer.find.return_value
host = mock.Mock()
host.dns_name = "10.0.0.1"
lb.hosts = [host]
manager = Manager(self.config)
manager.consul_manager.store_acl_network("x", "10.0.0.1/32", "192.168.1.1")
manager.remove_instance("x")
config = copy.deepcopy(self.config)
config.update(self.plan["config"])
self.LoadBalancer.find.assert_called_with("x", config)
for h in lb.hosts:
h.destroy.assert_called_once()
lb.destroy.assert_called_once()
self.assertEquals(self.storage.find_task("x").count(), 0)
self.assertIsNone(self.storage.find_instance_metadata("x"))
self.assertEquals([cert for cert in self.storage.find_le_certificates({"name": "x"})], [])
self.assertEquals([cert['name'] for cert in self.storage.find_le_certificates({"name": "y"})][0], "y")
destroy_token.assert_called_with("abc-123")
destroy_instance.assert_called_with("x")
acls = manager.consul_manager.find_acl_network("x")
self.assertEqual([], acls)
@mock.patch.object(rpaas.manager.consul_manager.ConsulManager, 'destroy_token', return_value=None)
def test_remove_instance_no_token(self, destroy_token):
self.storage.store_instance_metadata("x", plan_name="small")
lb = self.LoadBalancer.find.return_value
host = mock.Mock()
host.dns_name = "10.0.0.1"
lb.hosts = [host]
manager = Manager(self.config)
manager.remove_instance("x")
config = copy.deepcopy(self.config)
config.update(self.plan["config"])
self.LoadBalancer.find.assert_called_with("x", config)
for h in lb.hosts:
h.destroy.assert_called_once()
lb.destroy.assert_called_once()
self.assertEquals(self.storage.find_task("x").count(), 0)
self.assertIsNone(self.storage.find_instance_metadata("x"))
destroy_token.assert_not_called()
def test_remove_instance_remove_task_on_exception(self):
self.storage.store_instance_metadata("x", plan_name="small")
lb = self.LoadBalancer.find.return_value
lb.hosts = [mock.Mock(side_effect=Exception("test"))]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.remove_instance("x")
self.assertEquals(self.storage.find_task("x").count(), 0)
def test_remove_instance_on_swap_error(self):
self.storage.store_instance_metadata("x", plan_name="small")
manager = Manager(self.config)
manager.consul_manager.swap_instances("x", "y")
with self.assertRaises(InstanceAlreadySwappedError):
manager.remove_instance("x")
@mock.patch("rpaas.tasks.nginx")
def test_remove_instance_decrement_quota(self, nginx):
manager = Manager(self.config)
for name in ["a", "b", "c", "d", "e"]:
manager.new_instance(name)
with self.assertRaises(QuotaExceededError):
manager.new_instance("f")
manager.remove_instance("e")
manager.new_instance("f")
manager.remove_instance("e")
with self.assertRaises(QuotaExceededError):
manager.new_instance("g")
@mock.patch("rpaas.tasks.nginx")
def test_remove_instance_do_not_remove_similar_instance_name(self, nginx):
manager = Manager(self.config)
manager.new_instance("instance")
manager.new_instance("instance_abcdf")
manager.consul_manager.write_healthcheck("instance_abdcf")
manager.remove_instance("instance")
instance2_healthcheck = manager.consul_manager.client.kv.get("test-suite-rpaas/instance_abcdf/healthcheck")[1]
self.assertEqual(instance2_healthcheck['Value'], "true")
@mock.patch("rpaas.manager.LoadBalancer")
def test_restore_machine_instance(self, LoadBalancer):
manager = Manager(self.config)
lb = LoadBalancer.find.return_value
lb.adress = "10.1.1.1"
self.storage.store_instance_metadata("foo", consul_token="abc")
self.storage.db[self.storage.hosts_collection].insert({"_id": 0, "dns_name": "10.1.1.1",
"manager": "fake", "group": "foo",
"alternative_id": 0})
manager.restore_machine_instance('foo', '10.1.1.1')
task = self.storage.find_task("restore_10.1.1.1")
self.assertEqual(task[0]['host'], "10.1.1.1")
@mock.patch("rpaas.manager.LoadBalancer")
def test_restore_machine_invalid_dns_name(self, LoadBalancer):
manager = Manager(self.config)
lb = LoadBalancer.find.return_value
lb.adress = "10.2.2.2"
self.storage.store_instance_metadata("foo", consul_token="abc")
with self.assertRaises(rpaas.manager.InstanceMachineNotFoundError):
manager.restore_machine_instance('foo', '10.1.1.1')
def test_restore_machine_instance_cancel(self):
manager = Manager(self.config)
self.storage.store_task("restore_10.1.1.1")
manager.restore_machine_instance('foo', '10.1.1.1', True)
task = self.storage.find_task("restore_10.1.1.1")
self.assertEquals(task.count(), 0)
def test_restore_machine_instance_cancel_invalid_task(self):
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.TaskNotFoundError):
manager.restore_machine_instance('foo', '10.1.1.1', True)
@mock.patch("rpaas.manager.nginx")
@mock.patch("rpaas.manager.LoadBalancer")
def test_restore_instance_successfully(self, LoadBalancer, nginx):
self.config["CLOUDSTACK_TEMPLATE_ID"] = "default_template"
self.config["INSTANCE_EXTRA_TAGS"] = "x:y"
self.config["RPAAS_RESTORE_DELAY"] = 1
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"CLOUDSTACK_TEMPLATE_ID": "1234", "INSTANCE_EXTRA_TAGS": "a:b,c:d"}}
)
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = '10.1.1.1'
lb.hosts[0].id = 'xxx'
lb.hosts[1].dns_name = '10.2.2.2'
lb.hosts[1].id = 'yyy'
self.storage.store_instance_metadata("x", plan_name="huge", consul_token="abc-123")
manager = Manager(self.config)
responses = [response for response in manager.restore_instance("x")]
lb.hosts[0].stop.assert_called_once()
lb.hosts[0].scale.assert_called_once()
lb.hosts[0].restore.assert_called_once()
lb.hosts[1].scale.assert_called_once()
lb.hosts[1].stop.assert_called_once()
lb.hosts[1].restore.assert_called_once()
while "." in responses:
responses.remove(".")
expected_responses = ["Restoring host (1/2) xxx ", ": successfully restored\n",
"Restoring host (2/2) yyy ", ": successfully restored\n"]
self.assertListEqual(responses, expected_responses)
self.assertDictContainsSubset(LoadBalancer.find.call_args[1],
{'CLOUDSTACK_TEMPLATE_ID': u'1234', 'HOST_TAGS': u'a:b,c:d'})
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.manager.nginx")
@mock.patch("rpaas.manager.LoadBalancer")
def test_restore_instance_failed_restore(self, LoadBalancer, nginx):
self.config["CLOUDSTACK_TEMPLATE_ID"] = "default_template"
self.config["INSTANCE_EXTRA_TAGS"] = "x:y"
self.config["RPAAS_RESTORE_DELAY"] = 1
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"CLOUDSTACK_TEMPLATE_ID": "1234", "INSTANCE_EXTRA_TAGS": "a:b,c:d"}}
)
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = '10.1.1.1'
lb.hosts[0].id = 'xxx'
lb.hosts[1].dns_name = '10.2.2.2'
lb.hosts[1].id = 'yyy'
self.storage.store_instance_metadata("x", plan_name="huge", consul_token="abc-123")
manager = Manager(self.config)
nginx_manager = nginx.Nginx.return_value
nginx_manager.wait_healthcheck.side_effect = ["OK", Exception("timeout to response")]
responses = [response for response in manager.restore_instance("x")]
while "." in responses:
responses.remove(".")
nginx_manager.wait_healthcheck.assert_called_with(host='10.2.2.2', timeout=600,
manage_healthcheck=False)
expected_responses = ["Restoring host (1/2) xxx ", ": successfully restored\n",
"Restoring host (2/2) yyy ", ": failed to restore - 'timeout to response'\n"]
self.assertListEqual(responses, expected_responses)
self.assertDictContainsSubset(LoadBalancer.find.call_args[1],
{'CLOUDSTACK_TEMPLATE_ID': u'1234', 'HOST_TAGS': u'a:b,c:d'})
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.manager.nginx")
@mock.patch("rpaas.manager.LoadBalancer")
def test_restore_instance_failed_restore_change_plan(self, LoadBalancer, nginx):
self.config["CLOUDSTACK_TEMPLATE_ID"] = "default_template"
self.config["INSTANCE_EXTRA_TAGS"] = "x:y"
self.config["RPAAS_RESTORE_DELAY"] = 1
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"CLOUDSTACK_TEMPLATE_ID": "1234", "INSTANCE_EXTRA_TAGS": "a:b,c:d"}}
)
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = '10.1.1.1'
lb.hosts[0].id = 'xxx'
lb.hosts[1].dns_name = '10.2.2.2'
lb.hosts[1].id = 'yyy'
lb.hosts[1].scale.side_effect = Exception("failed to resize instance")
self.storage.store_instance_metadata("x", plan_name="huge", consul_token="abc-123")
manager = Manager(self.config)
responses = [response for response in manager.restore_instance("x")]
while "." in responses:
responses.remove(".")
expected_responses = ["Restoring host (1/2) xxx ", ": successfully restored\n",
"Restoring host (2/2) yyy ", ": failed to restore - 'failed to resize instance'\n"]
self.assertListEqual(responses, expected_responses)
self.assertDictContainsSubset(LoadBalancer.find.call_args[1],
{'CLOUDSTACK_TEMPLATE_ID': u'1234', 'HOST_TAGS': u'a:b,c:d'})
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.manager.nginx")
@mock.patch("rpaas.manager.LoadBalancer")
def test_restore_instance_service_instance_not_found(self, LoadBalancer, nginx):
self.config["CLOUDSTACK_TEMPLATE_ID"] = "default_template"
self.config["INSTANCE_EXTRA_TAGS"] = "x:y"
self.config["RPAAS_RESTORE_DELAY"] = 1
LoadBalancer.find.return_value = None
manager = Manager(self.config)
responses = [host for host in manager.restore_instance("x")]
self.assertListEqual(responses, ["instance x not found\n"])
self.assertEqual(self.storage.find_task("x").count(), 0)
@mock.patch("rpaas.manager.LoadBalancer")
def test_node_status(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = '10.1.1.1'
lb.hosts[1].dns_name = '10.2.2.2'
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.node_hostname.side_effect = ['vm-1', 'vm-2']
manager.consul_manager.node_status.return_value = {'vm-1': 'OK', 'vm-2': 'DEAD'}
node_status = manager.node_status("x")
LoadBalancer.find.assert_called_with("x")
self.assertDictEqual(node_status, {'vm-1': {'status': 'OK', 'address': '10.1.1.1'},
'vm-2': {'status': 'DEAD', 'address': '10.2.2.2'}})
@mock.patch("rpaas.manager.LoadBalancer")
def test_node_status_no_hostname(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = '10.1.1.1'
lb.hosts[1].dns_name = '10.2.2.2'
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.node_hostname.side_effect = ['vm-1', None]
manager.consul_manager.node_status.return_value = {'vm-1': 'OK', 'vm-2': 'DEAD'}
node_status = manager.node_status("x")
LoadBalancer.find.assert_called_with("x")
self.assertDictEqual(node_status, {'vm-1': {'status': 'OK', 'address': '10.1.1.1'},
'vm-2': {'status': 'DEAD'}})
@mock.patch("rpaas.manager.LoadBalancer")
def test_info(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.address = "192.168.1.1"
manager = Manager(self.config)
info = manager.info("x")
LoadBalancer.find.assert_called_with("x")
self.assertItemsEqual(info, [
{"label": "Address", "value": "192.168.1.1"},
{"label": "Instances", "value": "0"},
{"label": "Routes", "value": ""},
])
self.assertEqual(manager.status("x"), "192.168.1.1")
@mock.patch("rpaas.manager.LoadBalancer")
def test_info_with_plan(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.address = "192.168.1.1"
self.storage.store_instance_metadata("x", plan_name="small")
self.addCleanup(self.storage.remove_instance_metadata, "x")
manager = Manager(self.config)
info = manager.info("x")
LoadBalancer.find.assert_called_with("x")
self.assertItemsEqual(info, [
{"label": "Address", "value": "192.168.1.1"},
{"label": "Instances", "value": "0"},
{"label": "Routes", "value": ""},
{"label": "Plan", "value": "small"},
])
self.assertEqual(manager.status("x"), "192.168.1.1")
@mock.patch("rpaas.manager.LoadBalancer")
def test_info_with_binding(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/arrakis", None, "location /x {\nmy content ☺\n}")
self.storage.replace_binding_path("inst", "/https_only", "app1.host.com", https_only=True)
lb = LoadBalancer.find.return_value
lb.address = "192.168.1.1"
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
info = manager.info("inst")
LoadBalancer.find.assert_called_with("inst")
self.assertItemsEqual(info, [
{"label": "Address", "value": "192.168.1.1"},
{"label": "Instances", "value": "2"},
{"label": "Routes", "value": """path = /
destination = app.host.com
path = /arrakis
content = location /x {
my content ☺
}
path = /https_only
destination = app1.host.com (https only)"""},
])
self.assertEqual(manager.status("inst"), "192.168.1.1")
@mock.patch("rpaas.manager.tasks")
def test_info_status_pending(self, tasks):
self.storage.store_task("x")
self.storage.update_task("x", "something-id")
async_init = tasks.NewInstanceTask.return_value.AsyncResult
async_init.return_value.status = "PENDING"
manager = Manager(self.config)
info = manager.info("x")
self.assertItemsEqual(info, [
{"label": "Address", "value": "pending"},
{"label": "Instances", "value": "0"},
{"label": "Routes", "value": ""},
])
async_init.assert_called_with("something-id")
self.assertEqual(manager.status("x"), "pending")
@mock.patch("rpaas.manager.tasks")
def test_info_status_failure(self, tasks):
self.storage.store_task("x")
self.storage.update_task("x", "something-id")
async_init = tasks.NewInstanceTask.return_value.AsyncResult
async_init.return_value.status = "FAILURE"
manager = Manager(self.config)
info = manager.info("x")
self.assertItemsEqual(info, [
{"label": "Address", "value": "failure"},
{"label": "Instances", "value": "0"},
{"label": "Routes", "value": ""},
])
async_init.assert_called_with("something-id")
self.assertEqual(manager.status("x"), "failure")
@mock.patch("rpaas.tasks.nginx")
def test_scale_instance_up(self, nginx):
lb = self.LoadBalancer.find.return_value
lb.name = "x"
lb.hosts = [mock.Mock(), mock.Mock()]
delattr(lb, "dsr")
self.storage.store_instance_metadata("x", consul_token="abc-123")
self.addCleanup(self.storage.remove_instance_metadata, "x")
config = copy.deepcopy(self.config)
config["HOST_TAGS"] = "rpaas_service:test-suite-rpaas,rpaas_instance:x,consul_token:abc-123"
manager = Manager(self.config)
manager.consul_manager.store_acl_network("x", "10.0.0.4/32", "192.168.0.0/24")
hosts = [mock.Mock(), mock.Mock(), mock.Mock()]
for idx, host in enumerate(hosts):
host.dns_name = "10.0.0.{}".format(idx + 1)
self.Host.create.side_effect = hosts
manager.scale_instance("x", 5)
self.Host.create.assert_called_with("my-host-manager", "x", config)
self.assertEqual(self.Host.create.call_count, 3)
lb.add_host.assert_has_calls([mock.call(host) for host in hosts])
self.assertEqual(lb.add_host.call_count, 3)
nginx_manager = nginx.Nginx.return_value
expected_calls = [mock.call("10.0.0.1", timeout=600),
mock.call("10.0.0.2", timeout=600),
mock.call("10.0.0.3", timeout=600)]
self.assertEqual(expected_calls, nginx_manager.wait_healthcheck.call_args_list)
acls = manager.consul_manager.find_acl_network("x")
expected_acls = [{'destination': ['192.168.0.0/24'], 'source': '10.0.0.1/32'},
{'destination': ['192.168.0.0/24'], 'source': '10.0.0.2/32'},
{'destination': ['192.168.0.0/24'], 'source': '10.0.0.3/32'},
{'destination': ['192.168.0.0/24'], 'source': '10.0.0.4/32'}]
self.assertEqual(expected_acls, acls)
@mock.patch("rpaas.tasks.nginx")
def test_scale_instance_up_no_token(self, nginx):
lb = self.LoadBalancer.find.return_value
lb.dsr = False
lb.name = "x"
lb.hosts = [mock.Mock(), mock.Mock()]
config = copy.deepcopy(self.config)
config["HOST_TAGS"] = "rpaas_service:test-suite-rpaas,rpaas_instance:x,consul_token:abc-123"
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
manager.scale_instance("x", 5)
self.Host.create.assert_called_with("my-host-manager", "x", config)
self.assertEqual(self.Host.create.call_count, 3)
lb.add_host.assert_called_with(self.Host.create.return_value)
self.assertEqual(lb.add_host.call_count, 3)
nginx_manager = nginx.Nginx.return_value
created_host = self.Host.create.return_value
expected_calls = [mock.call(created_host.dns_name, timeout=600),
mock.call(created_host.dns_name, timeout=600),
mock.call(created_host.dns_name, timeout=600)]
self.assertEqual(expected_calls, nginx_manager.wait_healthcheck.call_args_list)
@mock.patch("rpaas.tasks.nginx")
def test_scale_instance_up_with_plan_and_flavor(self, nginx):
lb = self.LoadBalancer.find.return_value
lb.dsr = False
lb.name = "x"
lb.hosts = [mock.Mock(), mock.Mock()]
self.storage.store_instance_metadata("x", plan_name=self.plan["name"],
consul_token="abc-123", flavor_name=self.flavor["name"])
self.addCleanup(self.storage.remove_instance_metadata, "x")
config = copy.deepcopy(self.config)
config.update(self.plan["config"])
config.update(self.flavor["config"])
config["HOST_TAGS"] = "rpaas_service:test-suite-rpaas,rpaas_instance:x,consul_token:abc-123"
manager = Manager(self.config)
manager.scale_instance("x", 5)
self.Host.create.assert_called_with("my-host-manager", "x", config)
self.assertEqual(self.Host.create.call_count, 3)
lb.add_host.assert_called_with(self.Host.create.return_value)
self.assertEqual(lb.add_host.call_count, 3)
nginx_manager = nginx.Nginx.return_value
created_host = self.Host.create.return_value
expected_calls = [mock.call(created_host.dns_name, timeout=600),
mock.call(created_host.dns_name, timeout=600),
mock.call(created_host.dns_name, timeout=600)]
self.assertEqual(expected_calls, nginx_manager.wait_healthcheck.call_args_list)
def test_scale_instance_error_task_running(self):
self.storage.store_task("x")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
manager.scale_instance("x", 5)
def test_scale_instance_down(self):
lb = self.LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
self.storage.store_instance_metadata("x", consul_token="abc-123")
self.addCleanup(self.storage.remove_instance_metadata, "x")
manager = Manager(self.config)
manager.scale_instance("x", 1)
lb.hosts[0].destroy.assert_called_once
lb.remove_host.assert_called_once_with(lb.hosts[0])
@mock.patch("rpaas.tasks.consul_manager")
def test_scale_instance_down_with_healing_enabled(self, consul_manager):
consul = consul_manager.ConsulManager.return_value
config = copy.deepcopy(self.config)
lb = self.LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = '10.2.2.2'
lb.hosts[0].id = '1234'
self.storage.store_instance_metadata("x", consul_token="abc-123")
self.addCleanup(self.storage.remove_instance_metadata, "x")
consul.node_hostname.return_value = 'rpaas-2'
manager = Manager(config)
manager.consul_manager = mock.Mock()
manager.consul_manager.generate_token.return_value = "abc-123"
manager.scale_instance("x", 1)
lb.hosts[0].destroy.assert_called_once
lb.remove_host.assert_called_once_with(lb.hosts[0])
consul.remove_node.assert_called_once_with('x', 'rpaas-2', '1234')
def test_scale_instance_error(self):
lb = self.LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
with self.assertRaises(ScaleError):
manager.scale_instance("x", -1)
@mock.patch("rpaas.manager.LoadBalancer")
def test_bind_instance(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = "h1"
lb.hosts[1].dns_name = "h2"
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.bind("x", "apphost.com")
binding_data = self.storage.find_binding("x")
self.assertDictEqual(binding_data, {
"_id": "x",
"app_host": "apphost.com",
"paths": [{"path": "/", "destination": "apphost.com"}]
})
LoadBalancer.find.assert_called_with("x")
manager.consul_manager.write_location.assert_called_with("x", "/", destination="apphost.com",
router_mode=False, bind_mode=True, https_only=False)
def test_bind_instance_error_task_running(self):
self.storage.store_task("x")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
manager.bind("x", "apphost.com")
@mock.patch("rpaas.manager.LoadBalancer")
def test_bind_instance_multiple_bind_hosts(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.bind("x", "apphost.com")
binding_data = self.storage.find_binding("x")
self.assertDictEqual(binding_data, {
"_id": "x",
"app_host": "apphost.com",
"paths": [{"path": "/", "destination": "apphost.com"}]
})
LoadBalancer.find.assert_called_with("x")
manager.consul_manager.write_location.assert_called_with("x", "/", destination="apphost.com",
router_mode=False, bind_mode=True, https_only=False)
manager.consul_manager.reset_mock()
manager.bind("x", "apphost.com")
self.assertEqual(0, len(manager.consul_manager.mock_calls))
with self.assertRaises(rpaas.manager.BindError):
manager.bind("x", "another.host.com")
self.assertEqual(0, len(manager.consul_manager.mock_calls))
@mock.patch("rpaas.manager.LoadBalancer")
def test_bind_instance_with_route(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = "h1"
lb.hosts[1].dns_name = "h2"
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_route("x", "/somewhere", "my.other.host", None, False)
manager.add_route("x", "/", "my.other.host2", None, False)
manager.bind("x", "apphost.com")
binding_data = self.storage.find_binding("x")
self.assertDictEqual(binding_data, {
"_id": "x",
"app_host": "apphost.com",
"paths": [
{"path": "/somewhere", "destination": "my.other.host", "content": None, "https_only": False},
{"path": "/", "destination": "my.other.host2", "content": None, "https_only": False, }
]
})
LoadBalancer.find.assert_called_with("x")
expected_calls = [mock.call("x", "/somewhere", destination="my.other.host", content=None, https_only=False),
mock.call("x", "/", destination="my.other.host2", content=None, https_only=False)]
manager.consul_manager.write_location.assert_has_calls(expected_calls)
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_route_bind_and_unbind(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = "h1"
lb.hosts[1].dns_name = "h2"
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_route("x", "/", "my.other.host", None, False)
manager.bind("x", "apphost.com")
manager.unbind("x")
binding_data = self.storage.find_binding("x")
self.assertDictEqual(binding_data, {
"_id": "x",
"paths": [
{"path": "/", "destination": "my.other.host", "content": None, "https_only": False, }
]
})
LoadBalancer.find.assert_called_with("x")
expected_calls = [mock.call("x", "/", destination="my.other.host", content=None, https_only=False)]
manager.consul_manager.write_location.assert_has_calls(expected_calls)
@mock.patch("rpaas.manager.LoadBalancer")
def test_bind_on_x_add_route_unbind_and_bind_on_otherhost(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
lb.hosts[0].dns_name = "h1"
lb.hosts[1].dns_name = "h2"
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.bind("x", "apphost.com")
manager.add_route("x", "/", "my.custom.host", None, False)
manager.unbind("x")
manager.bind("x", "otherhost.com")
binding_data = self.storage.find_binding("x")
self.assertDictEqual(binding_data, {
"_id": "x",
"app_host": "otherhost.com",
"paths": [
{"path": "/", "destination": "my.custom.host", "content": None, "https_only": False}
]
})
LoadBalancer.find.assert_called_with("x")
expected_calls = [mock.call("x", "/", destination="apphost.com", router_mode=False, bind_mode=True,
https_only=False),
mock.call("x", "/", destination="my.custom.host", content=None, https_only=False)]
manager.consul_manager.write_location.assert_has_calls(expected_calls)
@mock.patch("rpaas.manager.LoadBalancer")
def test_unbind_instance(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.unbind("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"paths": []
})
LoadBalancer.find.assert_called_with("inst")
content_instance_not_bound = nginx.NGINX_LOCATION_INSTANCE_NOT_BOUND
manager.consul_manager.write_location.assert_called_with("inst", "/", content=content_instance_not_bound)
manager.consul_manager.remove_server_upstream.assert_called_once_with("inst", "rpaas_default_upstream",
"app.host.com")
@mock.patch("rpaas.manager.LoadBalancer")
def test_unbind_instance_fail_for_custom_route_added(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.add_route("inst", "/", "my.other.host", None, False)
manager.consul_manager = mock.Mock()
manager.unbind("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"paths": [
{"path": "/", "destination": "my.other.host", "content": None, "https_only": False}
]
})
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.write_location.assert_not_called()
manager.consul_manager.remove_server_upstream.assert_not_called()
@mock.patch("rpaas.manager.LoadBalancer")
def test_unbind_instance_with_extra_path(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/me", "somewhere.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.unbind("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"paths": [
{"path": "/me", "destination": "somewhere.com", "content": None, "https_only": False}
]
})
LoadBalancer.find.assert_called_with("inst")
content_instance_not_bound = nginx.NGINX_LOCATION_INSTANCE_NOT_BOUND
manager.consul_manager.write_location.assert_called_with("inst", "/", content=content_instance_not_bound)
manager.consul_manager.remove_server_upstream.assert_called_once_with("inst", "rpaas_default_upstream",
"app.host.com")
@mock.patch("rpaas.manager.LoadBalancer")
def test_unbind_and_bind_instance_with_extra_path(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/me", "somewhere.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.unbind("inst")
manager.bind("inst", "app2.host.com")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app2.host.com",
"paths": [
{"path": "/me", "destination": "somewhere.com", "content": None, "https_only": False},
{"path": "/", "destination": "app2.host.com"}
]
})
LoadBalancer.find.assert_called_with("inst")
content_instance_not_bound = nginx.NGINX_LOCATION_INSTANCE_NOT_BOUND
expected_calls = [mock.call("inst", "/", content=content_instance_not_bound),
mock.call("inst", "/", destination="app2.host.com", router_mode=False, bind_mode=True,
https_only=False)]
manager.consul_manager.write_location.assert_has_calls(expected_calls)
manager.consul_manager.remove_server_upstream.assert_called_once_with("inst", "rpaas_default_upstream",
"app.host.com")
@mock.patch("rpaas.manager.LoadBalancer")
def test_update_certificate(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.update_certificate("inst", "cert", "key")
LoadBalancer.find.assert_called_with("inst")
cert, key = manager.consul_manager.get_certificate("inst")
self.assertEqual(cert, "cert")
self.assertEqual(key, "key")
@mock.patch("rpaas.manager.LoadBalancer")
def test_update_certificate_instance_not_found_error(self, LoadBalancer):
LoadBalancer.find.return_value = None
manager = Manager(self.config)
with self.assertRaises(storage.InstanceNotFoundError):
manager.update_certificate("inst", "cert", "key")
LoadBalancer.find.assert_called_with("inst")
def test_update_certificate_error_task_running(self):
self.storage.store_task("inst")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
manager.update_certificate("inst", "cert", "key")
@mock.patch("rpaas.manager.LoadBalancer")
def test_get_certificate_success(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager.set_certificate("inst", "cert", "key")
cert, key = manager.get_certificate("inst")
self.assertEqual(cert, "cert")
self.assertEqual(key, "key")
@mock.patch("rpaas.manager.LoadBalancer")
def test_get_certificate_instance_not_found_error(self, LoadBalancer):
LoadBalancer.find.return_value = None
manager = Manager(self.config)
with self.assertRaises(storage.InstanceNotFoundError):
cert, key = manager.get_certificate("inst")
LoadBalancer.find.assert_called_with("inst")
def test_get_certificate_error_task_running(self):
self.storage.store_task("inst")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
cert, key = manager.get_certificate("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_get_certificate_not_found_error(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
with self.assertRaises(CertificateNotFoundError):
cert, key = manager.get_certificate("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_certificate_success(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager.set_certificate("inst", "cert", "key")
manager.delete_certificate("inst")
with self.assertRaises(CertificateNotFoundError):
cert, key = manager.consul_manager.get_certificate("inst")
def test_delete_certificate_error_task_running(self):
self.storage.store_task("inst")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
manager.delete_certificate("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_certificate_instance_not_found_error(self, LoadBalancer):
LoadBalancer.find.return_value = None
manager = Manager(self.config)
with self.assertRaises(storage.InstanceNotFoundError):
manager.delete_certificate("inst")
LoadBalancer.find.assert_called_with("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_route(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_route("inst", "/somewhere", "my.other.host", None, False)
manager.add_route("inst", "/https_only", "my.other.host2", None, True)
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [
{
"path": "/",
"destination": "app.host.com"
},
{
"path": "/somewhere",
"destination": "my.other.host",
"content": None,
"https_only": False
},
{
"path": "/https_only",
"destination": "my.other.host2",
"content": None,
"https_only": True
}
]
})
expected_calls = [mock.call("inst", "/somewhere", destination="my.other.host", content=None, https_only=False),
mock.call('inst', "/https_only", content=None, destination="my.other.host2", https_only=True)]
self.assertEqual(manager.consul_manager.write_location.call_args_list, expected_calls)
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_route_with_content(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_route("inst", "/somewhere", None, "location /x { something; }", False)
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [
{
"path": "/",
"destination": "app.host.com",
},
{
"path": "/somewhere",
"destination": None,
"content": "location /x { something; }",
"https_only": False
}
]
})
manager.consul_manager.write_location.assert_called_with("inst", "/somewhere", destination=None,
content="location /x { something; }", https_only=False)
def test_add_route_error_task_running(self):
self.storage.store_task("inst")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
manager.add_route("inst", "/somewhere", "my.other.host", None, False)
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_route_no_binding_creates_one(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_route("inst", "/somewhere", "my.other.host", None, False)
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"paths": [
{
"path": "/somewhere",
"destination": "my.other.host",
"content": None,
"https_only": False
}
]
})
manager.consul_manager.write_location.assert_called_with("inst", "/somewhere",
destination="my.other.host",
content=None, https_only=False)
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_route_with_destination(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/arrakis", "dune.com")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [{"path": "/", "destination": "app.host.com"},
{"path": "/arrakis", "destination": "dune.com", "content": None, "https_only": False}]
})
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.delete_route("inst", "/arrakis")
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [{"path": "/", "destination": "app.host.com"}]
})
manager.consul_manager.remove_server_upstream.assert_called_once_with("inst", "dune.com", "dune.com")
manager.consul_manager.remove_location.assert_called_with("inst", "/arrakis")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_route_with_custom_content(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/arrakis", None, "something")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [{"path": "/", "destination": "app.host.com"},
{"path": "/arrakis", "destination": None, "content": "something", "https_only": False}]
})
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.delete_route("inst", "/arrakis")
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [{"path": "/", "destination": "app.host.com"}]
})
manager.consul_manager.remove_server_upstream.assert_not_called()
manager.consul_manager.remove_location.assert_called_with("inst", "/arrakis")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_route_also_point_to_root(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/arrakis", "app.host.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.delete_route("inst", "/arrakis")
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [{"path": "/", "destination": "app.host.com"}]
})
manager.consul_manager.remove_server_upstream.assert_not_called()
manager.consul_manager.remove_location.assert_called_with("inst", "/arrakis")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_route_only_remove_upstream_on_last_reference(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
self.storage.replace_binding_path("inst", "/arrakis", "dune.com")
self.storage.replace_binding_path("inst", "/atreides", "dune.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.delete_route("inst", "/arrakis")
LoadBalancer.find.assert_called_with("inst")
binding_data = self.storage.find_binding("inst")
self.assertDictEqual(binding_data, {
"_id": "inst",
"app_host": "app.host.com",
"paths": [{"path": "/", "destination": "app.host.com"},
{"path": "/atreides", "destination": "dune.com", "content": None, "https_only": False}]
})
manager.consul_manager.remove_server_upstream.assert_not_called()
manager.consul_manager.remove_location.assert_called_with("inst", "/arrakis")
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_upstream_multiple_hosts(self, LoadBalancer):
lb = LoadBalancer.find.return_value
host1 = mock.Mock()
host1.dns_name = '10.0.0.1'
host2 = mock.Mock()
host2.dns_name = '10.0.0.2'
lb.hosts = [host1, host2]
manager = Manager(self.config)
manager.add_upstream("inst", "my_upstream", ['192.168.0.1', '192.168.0.2'], True)
acls = manager.consul_manager.find_acl_network("inst")
expected_acls = [{'destination': ['192.168.0.2', '192.168.0.1'],
'source': '10.0.0.1/32'},
{'destination': ['192.168.0.2', '192.168.0.1'],
'source': '10.0.0.2/32'}]
self.assertEqual(acls, expected_acls)
servers = manager.consul_manager.list_upstream("inst", "my_upstream")
self.assertEqual(servers, set(['192.168.0.2', '192.168.0.1']))
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_upstream_single_host(self, LoadBalancer):
lb = LoadBalancer.find.return_value
host1 = mock.Mock()
host1.dns_name = '10.0.0.1'
host2 = mock.Mock()
host2.dns_name = '10.0.0.2'
lb.hosts = [host1, host2]
manager = Manager(self.config)
manager.add_upstream("inst", "my_upstream", '192.168.0.1', True)
acls = manager.consul_manager.find_acl_network("inst")
expected_acls = [{'destination': ['192.168.0.1'],
'source': '10.0.0.1/32'},
{'destination': ['192.168.0.1'],
'source': '10.0.0.2/32'}]
self.assertEqual(acls, expected_acls)
servers = manager.consul_manager.list_upstream("inst", "my_upstream")
self.assertEqual(servers, set(['192.168.0.1']))
@mock.patch("rpaas.acl.AclManager")
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_upstream_using_acl_manager(self, LoadBalancer, AclManager):
lb = LoadBalancer.find.return_value
host1 = mock.Mock()
host1.dns_name = '10.0.0.1'
lb.hosts = [host1]
os.environ['CHECK_ACL_API'] = "1"
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_upstream("inst", "my_upstream", '192.168.0.1', True)
manager.acl_manager.add_acl.assert_called_once_with('inst', '10.0.0.1', '192.168.0.1')
manager.consul_manager.add_server_upstream.assert_called_once_with('inst', 'my_upstream', ['192.168.0.1'])
def test_delete_route_error_task_running(self):
self.storage.store_task("inst")
manager = Manager(self.config)
with self.assertRaises(rpaas.tasks.NotReadyError):
manager.delete_route("inst", "/arrakis")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_route_error_no_route(self, LoadBalancer):
self.storage.store_binding("inst", "app.host.com")
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
with self.assertRaises(storage.InstanceNotFoundError):
manager.delete_route("inst", "/somewhere")
LoadBalancer.find.assert_called_with("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_route_no_binding(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
with self.assertRaises(storage.InstanceNotFoundError):
manager.delete_route("inst", "/zahadum")
LoadBalancer.find.assert_called_with("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_block_with_content(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_block("inst", "server", "location /x { something; }")
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.write_block.assert_called_with(
"inst", "server", "location /x { something; }"
)
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_block(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.delete_block("inst", "http")
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.remove_block.assert_called_with("inst", "http")
@mock.patch("rpaas.manager.LoadBalancer")
def test_list_block(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.list_blocks.return_value = [
{u'block_name': 'server',
u'content': 'something nice in server'},
{u'block_name': 'http',
u'content': 'something nice in http'}
]
blocks = manager.list_blocks("inst")
self.assertDictEqual(blocks[0], {'block_name': 'server',
'content': 'something nice in server'})
self.assertDictEqual(blocks[1], {'block_name': 'http',
'content': 'something nice in http'})
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.list_blocks.assert_called_with("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_empty_list_blocks(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.list_blocks.return_value = []
blocks = manager.list_blocks("inst")
self.assertEqual(blocks, [])
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.list_blocks.assert_called_with("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_purge_location(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.nginx_manager = mock.Mock()
manager.nginx_manager.purge_location.side_effect = [True, True]
purged_hosts = manager.purge_location("inst", "/foo/bar", True)
LoadBalancer.find.assert_called_with("inst")
self.assertEqual(purged_hosts, 2)
manager.nginx_manager.purge_location.assert_any_call(lb.hosts[0].dns_name, "/foo/bar", True)
manager.nginx_manager.purge_location.assert_any_call(lb.hosts[1].dns_name, "/foo/bar", True)
@mock.patch("rpaas.manager.LoadBalancer")
def test_add_lua_with_content(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.add_lua("inst", "my_module", "server", "lua code")
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.write_lua.assert_called_with(
"inst", "my_module", "server", "lua code"
)
@mock.patch("rpaas.manager.LoadBalancer")
def test_list_lua_modules(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.consul_manager.list_lua_modules.return_value = {"somelua": {"server": "lua code"}}
modules = manager.list_lua("inst")
self.assertDictEqual(modules, {"somelua": {"server": "lua code"}})
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.list_lua_modules.assert_called_with("inst")
@mock.patch("rpaas.manager.LoadBalancer")
def test_delete_lua(self, LoadBalancer):
lb = LoadBalancer.find.return_value
lb.hosts = [mock.Mock(), mock.Mock()]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.delete_lua("inst", "server", "module")
LoadBalancer.find.assert_called_with("inst")
manager.consul_manager.remove_lua.assert_called_with("inst", "server", "module")
@mock.patch("rpaas.manager.LoadBalancer")
def test_swap_success(self, LoadBalancer):
LoadBalancer.find.side_effect = [mock.Mock, mock.Mock]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
manager.swap("x", "y")
manager.consul_manager.swap_instances.assert_called_with("x", "y")
@mock.patch("rpaas.manager.LoadBalancer")
def test_swap_instance_not_found(self, LoadBalancer):
LoadBalancer.find.side_effect = [mock.Mock, None]
manager = Manager(self.config)
manager.consul_manager = mock.Mock()
with self.assertRaises(storage.InstanceNotFoundError):
manager.swap("x", "y")
| 47.31848
| 120
| 0.631038
|
c2d9811be4faf1c557a27aa8f446be94fefc4a59
| 11,891
|
py
|
Python
|
cosmoslib/magnetic.py
|
guanyilun/cosmo-codes
|
a1fb44a1b61211a237080949ce4bfa9e7604083f
|
[
"MIT"
] | 1
|
2019-08-30T04:07:29.000Z
|
2019-08-30T04:07:29.000Z
|
cosmoslib/magnetic.py
|
guanyilun/cosmoslib
|
a1fb44a1b61211a237080949ce4bfa9e7604083f
|
[
"MIT"
] | null | null | null |
cosmoslib/magnetic.py
|
guanyilun/cosmoslib
|
a1fb44a1b61211a237080949ce4bfa9e7604083f
|
[
"MIT"
] | null | null | null |
"""Calculation related to magnetic fields
"""
import numpy as np
import scipy.special as sp
from scipy.special import spherical_jn
from scipy.interpolate import CubicSpline
from scipy.integrate import quad, romberg
from tqdm import tqdm
from cosmoslib.units import natural as u
from cosmoslib.utils.glquad import gauss_legendre_quadrature
from cosmoslib.utils.sphbessel import jl
from cosmoslib.utils import integrate
###############################
# transfer function for cl_aa #
###############################
class MagneticField:
def __init__(self, B_lambda, n_B, lam=1, h=0.673):
"""Calculate various magnetic field related quantities here
Parameters
----------
B_lambda: amplitude of PMF after smoothing in nG
P_k: 2-pt correlation function of magnetic field (vectorized func of k)
h: reduced hubble's parameter in unit of 100km/s/Mpc
"""
self.B_lambda = B_lambda
self.n_B = n_B
self.lam = lam
self.h = h
# compute amplitude
# Eq. (100) from arxiv 0911.2714. With an additional factor of 0.5 to match
# the convention used in other papers such as Kosowsky (2005) and Pagosian (2013)
# k_lambda = 2*np.pi / lam
# self.A = (2*np.pi)**(n_B+5)*(B_lambda*u.nG)**2 / (2*sp.gamma((n_B+3)/2)*k_lambda**(n_B+3))
# same expression but simplified
# self.A = (2*np.pi)**2*(B_lambda*u.nG)**2 / (2*sp.gamma((n_B+3)/2)) * lam**(n_B+3)
# self.A = (2*np.pi)**2*(B_lambda*u.nG)**2 / (2*sp.gamma((n_B+3)/2)) * lam**(n_B+3)
self.A = (2*np.pi)**2*(B_lambda*u.nG)**2 / (2*sp.gamma((n_B+3)/2)) * lam**(n_B+3)
def delta_m2(self, k, freq):
"""Calculate \Delta_M^2 following Eq. (12)
Parameters
----------
k: wavenumbers of interests
freq: frequency in GHz
"""
v0 = freq * u.GHz
return k**3*self.P_k(k)*(3/((16*np.pi**2*u.e)*v0**2))**2
def P_k(self, k):
"""find the primordial magnetic field power spectrum based on an
amplitude and spectral index
Parameters
----------
k (np.ndarray): wavenumbers of interests
"""
kD = self.kDissip()
Pk = np.zeros_like(k, dtype=np.double)
Pk[k<kD] = self.A*k**self.n_B
return Pk
def kDissip(self):
"""
Returns
-------
kD: in unit of Mpc^-1
"""
k_lambda = 2*np.pi / self.lam
# version from Planck 2015
# kD = 5.5e4 * self.B_lambda**(-2) * k_lambda**(self.n_B+3) * self.h
# version from Kosowsky 2005
kD = 2.9e4 * self.B_lambda**(-2) * k_lambda**(self.n_B+3) * self.h
kD = kD**(1/(self.n_B+5))
return kD
class ClaaTransferFunction:
def __init__(self, cosmo=None, mag=None, verbose=True):
"""Calculate the transfer function for Cl_\alpha\alpha from given
primordial magnetic field power spectrum following arxiv 1106.1438.
Parameters
----------
cosmo: CAMBdata object containing the background cosmology
mag: MagneticField object
verbose: option to be verbose
"""
self.cosmo = cosmo
self.mag = mag
self.verbose = verbose
def T(self, lmax, k, n_eta=1000):
"""In camb, opacity -> a n_e \sigma_T
T_L(k) = \int d\eta a n_e \sigma_T j_L(k(\eta_0-\eta)) -- Eq. (30)
Parameters
----------
lmax: maximum ell to compute upto
k: value of wavenumber to evaluate at
n_eta: number of values of etas to evaluate to use as spline
max_mem: maximum memory allowed to use in Gb
Returns
-------
(T[n_ell, n_k], T1[n_ell, n_k]) with n_ell = lmax+1
"""
# calculate the amount of memory roughly and split ells into
# multiple parts so we don't exceed the maximum memory specified
mem = (lmax+2)*n_eta*len(k)*8/1024**3 * 1.2
nparts = int(np.ceil(mem / max_mem))
# convention is everything with _ is a flat version
ells_ = np.arange(lmax+2)
if nparts > 1: ells_parts = np.array_split(ells_, nparts)
else: ells_parts = [ells_]
if self.verbose: print(f"-> Split ells into: {nparts:d} parts")
# get eta ranges to evalute based on given cosmology
eta_0 = self.cosmo.tau0 # comoving time today
eta_star = self.cosmo.tau_maxvis # comoving time at maximum of visibility
etas_ = np.linspace(eta_star, eta_0, n_eta) # flat array
if self.verbose: print(f"-> Eta integrated from {eta_0:.1f} to {eta_star:.1f}")
k_ = k # alias to be consistent
Tlks_list = [] # to store Tlk for each part
ells_list = []
# define a combined operation of splined integration using
spline_int = lambda x: quad(CubicSpline(etas_, x), eta_star, eta_0, epsrel=1e-4)[0]
for i in tqdm(range(len(ells_parts))):
ells_ = ells_parts[i]
# allocate each array to a seperate axis so broadcasting works properly
ells, k, etas = np.ix_(ells_, k_, etas_)
# don't need to do this every time but we don't expect to run lots of parts
dtau = self.cosmo.get_background_time_evolution(etas, vars=['opacity'], format='array')
jlk = spherical_jn(ells, k*(eta_0-etas))
integrand = dtau * jlk
del dtau, jlk
# apply spline integration to the right axis
Tlk = np.apply_along_axis(spline_int, -1, integrand)
del integrand
# store results for each part
Tlks_list.append(Tlk)
ells_list.append(ells)
# stack Tlk from different parts
if self.verbose: print(f"-> Stacking {nparts:d} parts")
Tlk = np.vstack(Tlks_list)
ells = np.vstack(ells_list)
del Tlks_list, ells_list
# calculate Tl1k
ells = ells[1:-1].reshape(-1,1) # l=1, lmax
# return up to lmax
return ells, Tlk[:-1,...]
def claa(self, lmin, lmax, kmin, kmax, lam0, nk=1000, n_eta=1000, max_mem=100):
"""Calculate C_l^{\alpha\alpha} following Eq. (31)
Parameters
----------
lmin, lmax: range of ells of interests
kmin, kmax: range of wavenumber k to integrate
lam0: observing wavelength
nk: number of k to compute to use as a basis for interpolation
"""
# get transfer functions
assert lmin>0, "Only support lmin>0 at the moment"
logk_ = np.linspace(np.log(kmin), np.log(kmax), nk)
# if self.Tlk is None or self.T1lk is None:
Tlk, T1lk = self.T(lmax+1, np.exp(logk_), n_eta, max_mem)
ells_ = np.arange(lmin, lmax+1, dtype=int)
Tm1 = Tlk[ells_-1, :]
Tp1 = Tlk[ells_+1, :]
T1 = T1lk[ells_, :]
print(np.sum(Tlk, axis=1))
del Tlk, T1lk
# make sure ells are aligned with the axis it's supposed to broadcast to
ells, logk = np.ix_(ells_, logk_)
integrand = (ells/(2*ells+1)*Tm1**2 + (ells+1)/(2*ells+1)*Tp1**2 - T1**2)
integrand *= self.mag.delta_m2(np.exp(logk), lam0)
del Tm1, Tp1, T1
# make a spline interpolator to be used for integration from logk to integrand
spline_int = lambda x: romberg(CubicSpline(logk_, x), np.log(kmin), np.log(kmax))
claa = 2/np.pi*np.apply_along_axis(spline_int, -1, integrand)
del integrand
return ells_, claa.ravel()
def jn_first_zero(n):
"""Get an approximated location for the first zero of
spherical bessel's function at a given order n"""
precomputed = [3.14159, 4.49341, 5.76346, 6.98793, 8.18256, 9.35581, 10.5128,
11.657, 12.7908, 13.9158, 15.0335, 16.1447, 17.2505, 18.3513,
19.4477, 20.5402, 21.6292, 22.715, 23.7978, 24.878, 25.9557,
27.0311, 28.1043, 29.1756, 30.245, 31.3127]
try: return precomputed[n]
except:
# formula 9.5.14 in Handbook of Mathematical Functions
v = n + 0.5
return v + 1.8557571*v**(1/3) + 1.033150*v**(-1/3) - \
0.00397*v**(-1) - 0.0908*v**(-5/3) + 0.043*v**(-7/3)
def jn_second_zero(n):
"""Get an approximated location for the first zero of
spherical bessel's function at a given order n"""
# formula 9.5.14 in Handbook of Mathematical Functions
v = n + 0.5
return v + 3.2446076*v**(1/3) + 3.1582436*v**(-1/3) - \
0.083307*v**(-1) - 0.84367*v**(-5/3) + 0.8639*v**(-7/3)
class KosowskyClaa:
def __init__(self, lmax, cosmo, mag):
"""Calculate Cl^aa using approximation formula in Kosowsky (2005)
Parameters
----------
lmax: maximum ell to calculate
cosmo: camb cosmology class
mag: magnetic field class
"""
self.lmax = lmax
self.cosmo = cosmo
self.mag = mag
def claa(self, nx=1000, freq=100, spl=CubicSpline, dtype=np.double):
"""
Parameters
----------
freq: frequency of interests in GHz
"""
v_0 = freq * u.GHz
h = self.cosmo.hubble_parameter(0)/100 # hubble h today
eta_0 = self.cosmo.tau0 * h # comoving time today
# kD = self.mag.kDissip()
kD = 2 # following Kosowsky 2005
xd = kD*eta_0
ells = np.arange(0, self.lmax+1, dtype=dtype)
clas = np.zeros_like(ells, dtype=dtype)
# perform exact calculate before x = x_approx
# and after that use the approximation that \int f(x) j_l^2 -> \int 1/2 f(x) 1/x^2
if self.mag.n_B == 1: remainder = lambda y: 0.5*np.log(y)
else: remainder = lambda y: 0.5*y**(self.mag.n_B-1) / (self.mag.n_B-1)
for i in tqdm(range(len(ells))):
l = ells[i]
# x_approx = xd # no approximation
x_approx = min(jn_second_zero(l),xd) # approximation
x = np.linspace(0, x_approx, nx, dtype=dtype)[1:]
integrand = x**self.mag.n_B * jl(l, x)**2
# clas[i] = integrate.chebyshev(spl(x, integrand), 0, x_approx, epsrel=1e-12, epsabs=1e-16) + remainder(xd) - remainder(x_approx)
clas[i] = integrate.romberg(spl(x, integrand), 0, x_approx, epsrel=1e-12, epsabs=1e-16) + remainder(xd) - remainder(x_approx)
# try the actual function instead of spline
# f = lambda x_: x_**self.mag.n_B * jl(l, x_)**2
# clas[i] = integrate.chebyshev(f, 1e-4, x_approx)
# reuse some numbers in mag.A
# alpha=e^2 convention
clas *= 9*ells*(ells+1)/(4*(2*np.pi)**5*u.e**2) * self.mag.A / eta_0**(self.mag.n_B+3) / (v_0**4)
# 4 pi alpha= e^2 convention
# clas *= 9*ells*(ells+1)/(8*(np.pi)**3*u.e**2) * self.mag.A / eta_0**(self.mag.n_B+3) / (v_0**4)
return ells, clas
def clbb_from_claa(lmax_b, clee, claa):
"""Calculate ClBB from Faraday's rotation based on an input rotational
power spectrum. It is assumed that the power spectra input starts from ell=0
Parameters
----------
lmax_b: lmax for ClBB
clee: ClEE power spectrum
claa: Cl^\alpha\alpha power spectrum (rotational power spectrum)
Returns
-------
clbb
"""
lmax_e = len(clee) - 1
lmax_a = len(claa) - 1
# not sure about these asserts
assert(lmax_e >= 2)
assert(lmax_a >= 2)
assert(lmax_b <= (lmax_e+lmax_a-1))
gl = gauss_legendre_quadrature(int((lmax_e + lmax_a + lmax_b)*0.5) + 1)
ls = np.arange(0, lmax_a+1, dtype=np.double)
zeta_00 = gl.cf_from_cl(0, 0, (2*ls+1)*claa)
ls = np.arange(0, lmax_e+1, dtype=np.double)
zeta_m2m2 = gl.cf_from_cl(-2, -2, (2*ls+1)*clee)
zeta_p2m2 = gl.cf_from_cl( 2, -2, (2*ls+1)*clee)
clbb = (gl.cl_from_cf(lmax_b, 2, 2, zeta_00 * zeta_m2m2) + \
gl.cl_from_cf(lmax_b, -2, 2, zeta_00 * zeta_p2m2)) / (4*np.pi)
return clbb
| 38.859477
| 141
| 0.583298
|
f8532cc164efb6861c2e040921ef6974c1559ec9
| 1,923
|
py
|
Python
|
backend/typetree/test_typetree.py
|
catenax-ng/product-data-integrity-demonstrator
|
6f6f6bb0ac6f42ec4dbdb79e607226a548703cb2
|
[
"Apache-2.0"
] | null | null | null |
backend/typetree/test_typetree.py
|
catenax-ng/product-data-integrity-demonstrator
|
6f6f6bb0ac6f42ec4dbdb79e607226a548703cb2
|
[
"Apache-2.0"
] | null | null | null |
backend/typetree/test_typetree.py
|
catenax-ng/product-data-integrity-demonstrator
|
6f6f6bb0ac6f42ec4dbdb79e607226a548703cb2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/catenax-ng/product-data-integrity-demonstrator
#
# SPDX-License-Identifier: Apache-2.0
import uuid
from urllib.parse import urlparse
from fastapi.testclient import TestClient
from typetree.models import ItemType
from cx.main import app
from typetree.typetree_private import *
client = TestClient(app)
def test_create_and_update_item_type():
type_name = str(uuid.uuid4())
# insert
r = client.post("/ItemType", json={
"type_details": {
"type_name": type_name,
"version": "1.0"
}
})
assert r.status_code == 200
j = r.json()
assert 'node' in j
assert 'id' in j['node']
assert 'type_details' in j
assert j['type_details']['type_name'] == type_name
id1 = j['node']['id']
# update
r = client.post("/ItemType", json={
"type_details": {
"type_name": type_name,
"version": "2.0"
}
})
assert r.status_code == 200
j = r.json()
id2 = j['node']['id']
assert id1 != id2
# fetch from id2 and check if 'previous' links to id1
assert 'previous' in j['node']['node']
assert j['node']['node']['previous'] == id1
# update 2
# update on a given 'previous' id, in this case based from 1st insert
# basically overruling the internal database head for the type
# this creates a separate branch version 1.0 -> 3.0
r = client.post("/ItemType", json={
"previous": id1,
"type_details": {
"type_name": type_name,
"version": "3.0"
}
})
assert r.status_code == 200
j = r.json()
id3 = j['node']['id']
assert id3 != id1
assert id3 != id2
# 'previous' link should point to id1 in this case
assert j['node']['node']['previous'] == id1
| 24.341772
| 73
| 0.601144
|
c6042d489482760fb53ce9c48c13ae1eabdb0ff7
| 3,824
|
py
|
Python
|
ucscsdk/mometa/callhome/CallhomeProfile.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/callhome/CallhomeProfile.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/callhome/CallhomeProfile.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for CallhomeProfile ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class CallhomeProfileConsts():
FORMAT_FULL_TXT = "fullTxt"
FORMAT_SHORT_TXT = "shortTxt"
FORMAT_XML = "xml"
LEVEL_CRITICAL = "critical"
LEVEL_DEBUG = "debug"
LEVEL_DISASTER = "disaster"
LEVEL_FATAL = "fatal"
LEVEL_MAJOR = "major"
LEVEL_MINOR = "minor"
LEVEL_NORMAL = "normal"
LEVEL_NOTIFICATION = "notification"
LEVEL_WARNING = "warning"
class CallhomeProfile(ManagedObject):
"""This is CallhomeProfile class."""
consts = CallhomeProfileConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("CallhomeProfile", "callhomeProfile", "profile-[name]", VersionMeta.Version101a, "InputOutput", 0x3ff, [], ["admin", "operations"], [u'callhomeEp'], [u'callhomeDest'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"alert_groups": MoPropertyMeta("alert_groups", "alertGroups", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x2, None, None, r"""((diagnostic|all|syslogPort|inventory|system|license|environmental|test|linecard|lifeCycle|ciscoTac|supervisor),){0,11}(diagnostic|all|syslogPort|inventory|system|license|environmental|test|linecard|lifeCycle|ciscoTac|supervisor){0,1}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"format": MoPropertyMeta("format", "format", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["fullTxt", "shortTxt", "xml"], []),
"level": MoPropertyMeta("level", "level", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["critical", "debug", "disaster", "fatal", "major", "minor", "normal", "notification", "warning"], []),
"max_size": MoPropertyMeta("max_size", "maxSize", "uint", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-5000000"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101a, MoPropertyMeta.NAMING, 0x80, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x100, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"alertGroups": "alert_groups",
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"format": "format",
"level": "level",
"maxSize": "max_size",
"name": "name",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.alert_groups = None
self.child_action = None
self.descr = None
self.format = None
self.level = None
self.max_size = None
self.status = None
ManagedObject.__init__(self, "CallhomeProfile", parent_mo_or_dn, **kwargs)
| 54.628571
| 401
| 0.647228
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.