hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace3c7815f36ff486526de46ee44a55229ebdc1a | 18,601 | py | Python | sdk/python/pulumi_aws/ec2/placement_group.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/ec2/placement_group.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/placement_group.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = ['PlacementGroupArgs', 'PlacementGroup']
@pulumi.input_type
class PlacementGroupArgs:
def __init__(__self__, *,
strategy: pulumi.Input[Union[str, 'PlacementStrategy']],
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PlacementGroup resource.
:param pulumi.Input[Union[str, 'PlacementStrategy']] strategy: The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
:param pulumi.Input[str] name: The name of the placement group.
:param pulumi.Input[int] partition_count: The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "strategy", strategy)
if name is not None:
pulumi.set(__self__, "name", name)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def strategy(self) -> pulumi.Input[Union[str, 'PlacementStrategy']]:
"""
The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: pulumi.Input[Union[str, 'PlacementStrategy']]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the placement group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _PlacementGroupState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
placement_group_id: Optional[pulumi.Input[str]] = None,
strategy: Optional[pulumi.Input[Union[str, 'PlacementStrategy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering PlacementGroup resources.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the placement group.
:param pulumi.Input[str] name: The name of the placement group.
:param pulumi.Input[int] partition_count: The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
:param pulumi.Input[str] placement_group_id: The ID of the placement group.
:param pulumi.Input[Union[str, 'PlacementStrategy']] strategy: The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if name is not None:
pulumi.set(__self__, "name", name)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if placement_group_id is not None:
pulumi.set(__self__, "placement_group_id", placement_group_id)
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the placement group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the placement group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="placementGroupId")
def placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the placement group.
"""
return pulumi.get(self, "placement_group_id")
@placement_group_id.setter
def placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "placement_group_id", value)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input[Union[str, 'PlacementStrategy']]]:
"""
The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input[Union[str, 'PlacementStrategy']]]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class PlacementGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
strategy: Optional[pulumi.Input[Union[str, 'PlacementStrategy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides an EC2 placement group. Read more about placement groups
in [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
web = aws.ec2.PlacementGroup("web", strategy="cluster")
```
## Import
Placement groups can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:ec2/placementGroup:PlacementGroup prod_pg production-placement-group
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the placement group.
:param pulumi.Input[int] partition_count: The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
:param pulumi.Input[Union[str, 'PlacementStrategy']] strategy: The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PlacementGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an EC2 placement group. Read more about placement groups
in [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
web = aws.ec2.PlacementGroup("web", strategy="cluster")
```
## Import
Placement groups can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:ec2/placementGroup:PlacementGroup prod_pg production-placement-group
```
:param str resource_name: The name of the resource.
:param PlacementGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PlacementGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
strategy: Optional[pulumi.Input[Union[str, 'PlacementStrategy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PlacementGroupArgs.__new__(PlacementGroupArgs)
__props__.__dict__["name"] = name
__props__.__dict__["partition_count"] = partition_count
if strategy is None and not opts.urn:
raise TypeError("Missing required property 'strategy'")
__props__.__dict__["strategy"] = strategy
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["placement_group_id"] = None
__props__.__dict__["tags_all"] = None
super(PlacementGroup, __self__).__init__(
'aws:ec2/placementGroup:PlacementGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
placement_group_id: Optional[pulumi.Input[str]] = None,
strategy: Optional[pulumi.Input[Union[str, 'PlacementStrategy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'PlacementGroup':
"""
Get an existing PlacementGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the placement group.
:param pulumi.Input[str] name: The name of the placement group.
:param pulumi.Input[int] partition_count: The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
:param pulumi.Input[str] placement_group_id: The ID of the placement group.
:param pulumi.Input[Union[str, 'PlacementStrategy']] strategy: The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PlacementGroupState.__new__(_PlacementGroupState)
__props__.__dict__["arn"] = arn
__props__.__dict__["name"] = name
__props__.__dict__["partition_count"] = partition_count
__props__.__dict__["placement_group_id"] = placement_group_id
__props__.__dict__["strategy"] = strategy
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return PlacementGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the placement group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the placement group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[int]:
"""
The number of partitions to create in the
placement group. Can only be specified when the `strategy` is set to
`"partition"`. Valid values are 1 - 7 (default is `2`).
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="placementGroupId")
def placement_group_id(self) -> pulumi.Output[str]:
"""
The ID of the placement group.
"""
return pulumi.get(self, "placement_group_id")
@property
@pulumi.getter
def strategy(self) -> pulumi.Output[str]:
"""
The placement strategy. Can be `"cluster"`, `"partition"` or `"spread"`.
"""
return pulumi.get(self, "strategy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
| 43.767059 | 249 | 0.637278 |
ace3c916c11e834008381e697fa268696684017c | 1,631 | py | Python | examples/dfp/v201602/product_service/get_all_products.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/dfp/v201602/product_service/get_all_products.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201602/product_service/get_all_products.py | wbrp/googleads-python-lib | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all products.
Products are created automatically from product templates.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
product_service = client.GetService('ProductService', version='v201602')
# Create a filter statement.
statement = dfp.FilterStatement('ORDER BY id ASC')
# Get products by statement.
while True:
response = product_service.getProductsByStatement(statement.ToStatement())
if 'results' in response:
# Display results.
for product in response['results']:
print ('Product with id \'%s\' and name \'%s\' was found.' % (
product['id'], product['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 30.203704 | 78 | 0.721643 |
ace3cbedc7d4881225c1195ac575300acae606c7 | 1,299 | py | Python | .history/my_classes/ScopesClosuresAndDecorators/nonLocalScopes_20210710215608.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/ScopesClosuresAndDecorators/nonLocalScopes_20210710215608.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/ScopesClosuresAndDecorators/nonLocalScopes_20210710215608.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ NonLocal Scopes
Inner Functions
We can define functions from inside another function:
"""
def outer_func():
# some code
# this is an example of a nested function
def inner_func():
# some code
inner_func()
outer_func()
"""
Both functions have access to the global and built-in scopes as well as their respective local scopes
But the inner function also has access to its enclosing scope - the scope of the outer function
That scope is neither local (to inner_func) nor global - it is called a non local scope
Referencing variables from the inclosing scope
Consider this example
module1.py
a = 10
def outer_func():
print(a)
outer_func() When we call outer_func, Python sees the reference to a
Consider this example
module1.py
def outer_func():
a = 10
def inner_func():
print(a)
inner_func()
outer_func()
When we call outer_func, inner_func is created and called
When inner_func is called, Python does not find a in the local(inner_func) scope
So it looks for it in the enclosing scope, in this case the scope of the outer func
Since it does not find it there
Another example
module1.py
a = 10
def outer_func():
def inner_func():
print(a)
inner_func()
outer_func()
"""
| 17.554054 | 101 | 0.69746 |
ace3cc24a29029f497ead811c8edeee880ff00ad | 340 | py | Python | rf_openmm/__init__.py | dominicrufa/rf_openmm | 4376a25218529f3ecae09760f01557233885dfd3 | [
"MIT"
] | null | null | null | rf_openmm/__init__.py | dominicrufa/rf_openmm | 4376a25218529f3ecae09760f01557233885dfd3 | [
"MIT"
] | null | null | null | rf_openmm/__init__.py | dominicrufa/rf_openmm | 4376a25218529f3ecae09760f01557233885dfd3 | [
"MIT"
] | null | null | null | """
rf_openmm
a library and implementation sandbox for the reaction field nonbonded method in OpenMM
"""
# Add imports here
from .rf_openmm import *
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions['version']
__git_revision__ = versions['full-revisionid']
del get_versions, versions
| 22.666667 | 86 | 0.791176 |
ace3cddc19fca59c12ec51150eec7e2c05300a9f | 2,004 | py | Python | src/the_tale/the_tale/linguistics/workers/linguistics_manager.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/linguistics/workers/linguistics_manager.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/linguistics/workers/linguistics_manager.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
class Worker(utils_workers.BaseWorker):
GET_CMD_TIMEOUT = 10
def clean_queues(self):
super(Worker, self).clean_queues()
self.stop_queue.queue.purge()
def initialize(self):
self.initialized = True
self._next_words_update_at = None
self._next_templates_update_at = None
self.logger.info('LINGUISTICS MANAGER INITIALIZED')
def process_no_cmd(self):
if self._next_templates_update_at is not None and datetime.datetime.now() > self._next_templates_update_at:
self.logger.info('update templates errors status')
logic.update_templates_errors()
self._next_templates_update_at = None
if self._next_words_update_at is not None and datetime.datetime.now() > self._next_words_update_at:
self.logger.info('update words_usage_info')
logic.update_words_usage_info()
self._next_words_update_at = None
def cmd_game_dictionary_changed(self):
return self.send_cmd('game_dictionary_changed')
def process_game_dictionary_changed(self):
# when dictionary changed, we update templates
if self._next_templates_update_at is None:
self._next_templates_update_at = datetime.datetime.now() + conf.settings.LINGUISTICS_MANAGER_UPDATE_DELAY
# and dictionary (since words changed)
if self._next_words_update_at is None:
self._next_words_update_at = datetime.datetime.now() + conf.settings.LINGUISTICS_MANAGER_UPDATE_DELAY
def cmd_game_lexicon_changed(self):
return self.send_cmd('game_lexicon_changed')
def process_game_lexicon_changed(self):
# when lexicon changed, we update dictinoary
if self._next_words_update_at is None:
self._next_words_update_at = datetime.datetime.now() + conf.settings.LINGUISTICS_MANAGER_UPDATE_DELAY
# and not update templates, sicnce errors status calculated in save method
| 38.538462 | 117 | 0.718563 |
ace3ce0f75034cde72969cc2b4c00d0d8492a56d | 4,579 | py | Python | src/setup.py | rostob/Limnoria | 068488c546612ee0198cecf1a4a46e2667551bcf | [
"BSD-3-Clause"
] | 22 | 2021-09-01T20:51:10.000Z | 2022-03-23T05:51:58.000Z | src/setup.py | rostob/Limnoria | 068488c546612ee0198cecf1a4a46e2667551bcf | [
"BSD-3-Clause"
] | 16 | 2021-09-02T08:33:29.000Z | 2022-03-28T18:21:09.000Z | src/setup.py | rostob/Limnoria | 068488c546612ee0198cecf1a4a46e2667551bcf | [
"BSD-3-Clause"
] | 9 | 2021-09-02T09:07:53.000Z | 2022-03-28T17:34:59.000Z | ###
# Copyright (c) 2020-2021, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
try:
import setuptools
except ImportError:
setuptools = None
from . import authors
if setuptools:
def plugin_setup(plugin, **kwargs):
"""Wrapper of setuptools.setup that auto-fills some fields for
Limnoria plugins."""
if isinstance(plugin, str):
if plugin in sys.modules:
plugin = sys.modules[plugin]
else:
setup_path = sys.modules['__main__'].__file__
sys.path.insert(0, os.path.join(os.path.dirname(setup_path), '..'))
plugin = __import__(plugin)
author = plugin.__author__
version = plugin.__version__
url = plugin.__url__
maintainer = getattr(plugin, '__maintainer__', authors.unknown)
kwargs.setdefault('package_data', {}).setdefault('', []).append('*.po')
capitalized_name = plugin.Class.__name__
kwargs.setdefault(
'name', 'limnoria-%s' % capitalized_name.lower())
if version:
kwargs.setdefault('version', version)
if url:
kwargs.setdefault('url', url)
if 'long_description' not in kwargs:
readme_files = [
('text/x-rst', 'README.rst'),
('text/markdown', 'README.md'),
]
for (mimetype, filename) in readme_files:
readme_path = os.path.join(
os.path.dirname(plugin.__file__), filename)
if os.path.isfile(readme_path):
with open(readme_path, 'r') as fd:
kwargs['long_description'] = fd.read()
kwargs['long_description_content_type'] = mimetype
break
module_name = kwargs['name'].replace('-', '_')
kwargs.setdefault('packages', [module_name])
kwargs.setdefault('package_dir', {module_name: '.'})
kwargs.setdefault('entry_points', {
'limnoria.plugins': '%s = %s' % (capitalized_name, module_name)})
kwargs.setdefault('install_requires', []).append('limnoria')
kwargs.setdefault('classifiers', []).extend([
'Environment :: Plugins',
'Programming Language :: Python :: 3',
'Topic :: Communications :: Chat',
])
if author is not authors.unknown:
if author.name or author.nick:
kwargs.setdefault('author', author.name or author.nick)
if author.email:
kwargs.setdefault('author_email', author.email)
if maintainer is not authors.unknown:
if maintainer.name or maintainer.nick:
kwargs.setdefault(
'maintainer', maintainer.name or maintainer.nick)
if maintainer.email:
kwargs.setdefault('maintainer_email', maintainer.email)
setuptools.setup(
**kwargs)
else:
def plugin_setup(plugin, **kwargs):
raise ImportError('setuptools')
| 40.166667 | 83 | 0.637257 |
ace3cef622dac179c4b44c63104154e13f2cc367 | 23,005 | py | Python | app/questionnaire/questionnaire_schema.py | ONSdigital/eq-questionnaire-runner | cac38e81714b03e3e85c56f9098adc01e7ccc703 | [
"MIT"
] | 3 | 2020-09-28T13:21:21.000Z | 2021-05-05T14:14:51.000Z | app/questionnaire/questionnaire_schema.py | ONSdigital/eq-questionnaire-runner | cac38e81714b03e3e85c56f9098adc01e7ccc703 | [
"MIT"
] | 402 | 2019-11-06T17:23:03.000Z | 2022-03-31T16:03:35.000Z | app/questionnaire/questionnaire_schema.py | ONSdigital/eq-questionnaire-runner | cac38e81714b03e3e85c56f9098adc01e7ccc703 | [
"MIT"
] | 10 | 2020-03-03T14:23:27.000Z | 2022-01-31T12:21:21.000Z | from collections import abc, defaultdict
from copy import deepcopy
from functools import cached_property
from typing import Any, Generator, Iterable, Mapping, Optional, Union
from flask_babel import force_locale
from werkzeug.datastructures import ImmutableDict
from app.data_models.answer import Answer
from app.forms import error_messages
from app.questionnaire.schema_utils import get_values_for_key
DEFAULT_LANGUAGE_CODE = "en"
LIST_COLLECTOR_CHILDREN = [
"ListAddQuestion",
"ListEditQuestion",
"ListRemoveQuestion",
"PrimaryPersonListAddOrEditQuestion",
]
RELATIONSHIP_CHILDREN = ["UnrelatedQuestion"]
QuestionSchema = Mapping[str, Any]
class QuestionnaireSchema: # pylint: disable=too-many-public-methods
def __init__(
self, questionnaire_json: Mapping, language_code: str = DEFAULT_LANGUAGE_CODE
):
self._parent_id_map: dict[str, str] = {}
self._list_name_to_section_map: dict[str, list[str]] = {}
self._language_code = language_code
self._questionnaire_json = questionnaire_json
self._sections_by_id = self._get_sections_by_id()
self._groups_by_id = self._get_groups_by_id()
self._blocks_by_id = self._get_blocks_by_id()
self._questions_by_id = self._get_questions_by_id()
self._answers_by_id = self._get_answers_by_id()
@cached_property
def language_code(self) -> str:
return self._language_code
@cached_property
def error_messages(self) -> Any:
return self.serialize(self._get_error_messages())
@cached_property
def json(self) -> Any:
return self.serialize(self._questionnaire_json)
@cached_property
def survey(self) -> Optional[str]:
survey: Optional[str] = self.json.get("survey")
return survey
@cached_property
def form_type(self) -> Optional[str]:
form_type: Optional[str] = self.json.get("form_type")
return form_type
@cached_property
def region_code(self) -> Optional[str]:
region_code: Optional[str] = self.json.get("region_code")
return region_code
@cached_property
def parent_id_map(self) -> Any:
return self.serialize(self._parent_id_map)
@classmethod
def serialize(cls, data: Any) -> Any:
if isinstance(data, abc.Hashable):
return data
if isinstance(data, list):
return tuple((cls.serialize(item) for item in data))
if isinstance(data, dict):
key_value_tuples = {k: cls.serialize(v) for k, v in data.items()}
return ImmutableDict(key_value_tuples)
@classmethod
def get_mutable_deepcopy(cls, data: Any) -> Any:
if isinstance(data, tuple):
return list((cls.get_mutable_deepcopy(item) for item in data))
if isinstance(data, ImmutableDict):
key_value_tuples = {k: cls.get_mutable_deepcopy(v) for k, v in data.items()}
return dict(key_value_tuples)
return deepcopy(data)
def _get_sections_by_id(self) -> dict[str, ImmutableDict]:
return {section["id"]: section for section in self.json.get("sections", [])}
def _get_groups_by_id(self) -> dict[str, ImmutableDict]:
groups_by_id: dict[str, ImmutableDict] = {}
for section in self._sections_by_id.values():
for group in section["groups"]:
group_id = group["id"]
groups_by_id[group_id] = group
self._parent_id_map[group_id] = section["id"]
return groups_by_id
def _get_blocks_by_id(self) -> dict[str, ImmutableDict]:
blocks: dict[str, ImmutableDict] = {}
for group in self._groups_by_id.values():
for block in group["blocks"]:
block_id = block["id"]
self._parent_id_map[block_id] = group["id"]
blocks[block_id] = block
if block["type"] in (
"ListCollector",
"PrimaryPersonListCollector",
"RelationshipCollector",
):
for nested_block_name in [
"add_block",
"edit_block",
"remove_block",
"add_or_edit_block",
"unrelated_block",
]:
if block.get(nested_block_name):
nested_block = block[nested_block_name]
nested_block_id = nested_block["id"]
blocks[nested_block_id] = nested_block
self._parent_id_map[nested_block_id] = block_id
return blocks
def _get_questions_by_id(self) -> dict[str, list[ImmutableDict]]:
questions_by_id = defaultdict(list)
for block in self._blocks_by_id.values():
questions = self.get_all_questions_for_block(block)
for question in questions:
question_id = question["id"]
questions_by_id[question_id].append(question)
self._parent_id_map[question_id] = block["id"]
return questions_by_id
def _get_answers_by_id(self) -> dict[str, list[ImmutableDict]]:
answers_by_id = defaultdict(list)
for question_set in self._questions_by_id.values():
for question in question_set:
question_id = question["id"]
for answer in question["answers"]:
answer_id = answer["id"]
self._parent_id_map[answer_id] = question_id
answers_by_id[answer["id"]].append(answer)
for option in answer.get("options", []):
detail_answer = option.get("detail_answer")
if detail_answer:
detail_answer_id = detail_answer["id"]
answers_by_id[detail_answer_id].append(detail_answer)
self._parent_id_map[detail_answer_id] = question_id
return answers_by_id
@cached_property
def _flow(self) -> ImmutableDict[str, Any]:
questionnaire_flow: ImmutableDict = self.json["questionnaire_flow"]
return questionnaire_flow
@cached_property
def flow_options(self) -> ImmutableDict[str, Any]:
options: ImmutableDict[str, Any] = self._flow["options"]
return options
@cached_property
def is_flow_hub(self) -> bool:
return bool(self._flow["type"] == "Hub")
@cached_property
def is_flow_linear(self) -> bool:
return bool(self._flow["type"] == "Linear")
@cached_property
def is_view_submitted_response_enabled(self) -> bool:
schema: Mapping = self.get_post_submission()
is_enabled: bool = schema.get("view_response", False)
return is_enabled
def get_section_ids_required_for_hub(self) -> list[str]:
return self.flow_options.get("required_completed_sections", [])
def get_summary_options(self) -> ImmutableDict[str, Any]:
return self.flow_options.get("summary", {})
def get_sections(self) -> Iterable[ImmutableDict]:
return self._sections_by_id.values()
def get_section(self, section_id: str) -> Optional[ImmutableDict]:
return self._sections_by_id.get(section_id)
def get_section_ids_dependent_on_list(self, list_name: str) -> list[str]:
try:
return self._list_name_to_section_map[list_name]
except KeyError:
section_ids = self._section_ids_associated_to_list_name(list_name)
self._list_name_to_section_map[list_name] = section_ids
return section_ids
def get_submission(self) -> ImmutableDict:
schema: ImmutableDict = self.json.get("submission", ImmutableDict({}))
return schema
def get_post_submission(self) -> ImmutableDict:
schema: ImmutableDict = self.json.get("post_submission", ImmutableDict({}))
return schema
def _section_ids_associated_to_list_name(self, list_name: str) -> list[str]:
section_ids: list[str] = []
for section in self.get_sections():
ignore_keys = ["question_variants", "content_variants"]
when_rules = get_values_for_key(section, "when", ignore_keys)
if any(
rule.get("list") == list_name
for when_rule in when_rules
for rule in when_rule
):
section_ids.append(section["id"])
return section_ids
@staticmethod
def get_blocks_for_section(
section: Mapping,
) -> Generator[ImmutableDict, None, None]:
return (block for group in section["groups"] for block in group["blocks"])
@classmethod
def get_driving_question_for_list(
cls, section: Mapping, list_name: str
) -> Optional[ImmutableDict]:
for block in cls.get_blocks_for_section(section):
if (
block["type"] == "ListCollectorDrivingQuestion"
and list_name == block["for_list"]
):
return block
def get_remove_block_id_for_list(self, list_name: str) -> Optional[str]:
for block in self.get_blocks():
if block["type"] == "ListCollector" and block["for_list"] == list_name:
remove_block_id: str = block["remove_block"]["id"]
return remove_block_id
def get_individual_response_list(self) -> Optional[str]:
list_name: Optional[str] = self.json.get("individual_response", {}).get(
"for_list"
)
return list_name
def get_individual_response_show_on_hub(self) -> bool:
show_on_hub: bool = self.json.get("individual_response", {}).get(
"show_on_hub", True
)
return show_on_hub
def get_individual_response_individual_section_id(self) -> Optional[str]:
section_id: Optional[str] = self._questionnaire_json.get(
"individual_response", {}
).get("individual_section_id")
return section_id
def get_title_for_section(self, section_id: str) -> Optional[str]:
if section := self.get_section(section_id):
return section.get("title")
def get_show_on_hub_for_section(self, section_id: str) -> Optional[bool]:
if section := self.get_section(section_id):
return section.get("show_on_hub", True)
def get_summary_for_section(self, section_id: str) -> Optional[ImmutableDict]:
if section := self.get_section(section_id):
return section.get("summary")
def get_summary_title_for_section(self, section_id: str) -> Optional[str]:
if summary := self.get_summary_for_section(section_id):
return summary.get("title")
def show_summary_on_completion_for_section(self, section_id: str) -> Optional[bool]:
if summary := self.get_summary_for_section(section_id):
return summary.get("show_on_completion", False)
def get_repeat_for_section(self, section_id: str) -> Optional[ImmutableDict]:
if section := self.get_section(section_id):
return section.get("repeat")
def get_repeating_list_for_section(self, section_id: str) -> Optional[str]:
if repeat := self.get_repeat_for_section(section_id):
return repeat.get("for_list")
def get_repeating_title_for_section(
self, section_id: str
) -> Optional[ImmutableDict]:
if repeat := self.get_repeat_for_section(section_id):
return repeat.get("title")
def get_repeating_page_title_for_section(self, section_id: str) -> Optional[str]:
if repeat := self.get_repeat_for_section(section_id):
return repeat.get("page_title")
def get_custom_page_title_for_section(self, section_id: str) -> Optional[str]:
if summary := self.get_summary_for_section(section_id):
return summary.get("page_title")
def get_section_for_block_id(self, block_id: str) -> Optional[ImmutableDict]:
block = self.get_block(block_id)
if (
block
and block.get("type") in LIST_COLLECTOR_CHILDREN + RELATIONSHIP_CHILDREN
):
section_id = self._get_parent_section_id_for_block(block_id)
else:
group_id = self._parent_id_map[block_id]
section_id = self._parent_id_map[group_id]
return self.get_section(section_id)
def get_section_id_for_block_id(self, block_id: str) -> Optional[str]:
if section := self.get_section_for_block_id(block_id):
section_id: str = section["id"]
return section_id
def get_groups(self) -> Iterable[ImmutableDict]:
return self._groups_by_id.values()
def get_group(self, group_id: str) -> Optional[ImmutableDict]:
return self._groups_by_id.get(group_id)
def get_group_for_block_id(self, block_id: str) -> Optional[ImmutableDict]:
return self._group_for_block(block_id)
def get_first_block_id_for_group(self, group_id: str) -> Optional[str]:
group = self.get_group(group_id)
if group:
block_id: str = group["blocks"][0]["id"]
return block_id
def get_first_block_id_for_section(self, section_id: str) -> Optional[str]:
section = self.get_section(section_id)
if section:
group_id: str = section["groups"][0]["id"]
return self.get_first_block_id_for_group(group_id)
def get_blocks(self) -> Iterable[ImmutableDict]:
return self._blocks_by_id.values()
def get_block(self, block_id: str) -> Optional[ImmutableDict]:
return self._blocks_by_id.get(block_id)
def is_block_valid(self, block_id: str) -> bool:
return bool(self.get_block(block_id))
def get_block_for_answer_id(self, answer_id: str) -> Optional[ImmutableDict]:
return self._block_for_answer(answer_id)
def is_block_in_repeating_section(self, block_id: str) -> Optional[bool]:
if section_id := self.get_section_id_for_block_id(block_id=block_id):
return bool(self.get_repeating_list_for_section(section_id))
def is_answer_in_list_collector_block(self, answer_id: str) -> Optional[bool]:
if block := self.get_block_for_answer_id(answer_id):
return self.is_list_block_type(block["type"])
def is_answer_in_repeating_section(self, answer_id: str) -> Optional[bool]:
if block := self.get_block_for_answer_id(answer_id):
return self.is_block_in_repeating_section(block_id=block["id"])
def is_repeating_answer(
self,
answer_id: str,
) -> bool:
return bool(
self.is_answer_in_list_collector_block(answer_id)
or self.is_answer_in_repeating_section(answer_id)
)
def get_answers_by_answer_id(self, answer_id: str) -> list[ImmutableDict]:
"""Return answers matching answer id, including all matching answers inside
variants
"""
answers: list[ImmutableDict] = self._answers_by_id.get(answer_id, [])
return answers
def get_default_answer(self, answer_id: str) -> Optional[Answer]:
if answer_schemas := self.get_answers_by_answer_id(answer_id):
first_answer_schema = answer_schemas[0]
try:
return Answer(first_answer_schema["id"], first_answer_schema["default"])
except (IndexError, KeyError, TypeError):
return None
return None
def get_add_block_for_list_collector(
self, list_collector_id: str
) -> Optional[ImmutableDict]:
add_block_map = {
"ListCollector": "add_block",
"PrimaryPersonListCollector": "add_or_edit_block",
}
if list_collector := self.get_block(list_collector_id):
add_block: ImmutableDict = list_collector[
add_block_map[list_collector["type"]]
]
return add_block
def get_answer_ids_for_list_items(
self, list_collector_id: str
) -> Optional[list[str]]:
"""
Get answer ids used to add items to a list.
"""
if add_block := self.get_add_block_for_list_collector(list_collector_id):
return self.get_answer_ids_for_block(add_block["id"])
def get_questions(self, question_id: str) -> Optional[list[ImmutableDict]]:
"""Return a list of questions matching some question id
This includes all questions inside variants
"""
return self._questions_by_id.get(question_id)
@staticmethod
def get_list_collectors_for_list(
section: Mapping, for_list: str, primary: bool = False
) -> Generator[ImmutableDict, None, None]:
collector_type = "PrimaryPersonListCollector" if primary else "ListCollector"
return (
block
for block in QuestionnaireSchema.get_blocks_for_section(section)
if block["type"] == collector_type and block["for_list"] == for_list
)
@staticmethod
def get_list_collector_for_list(
section: Mapping, for_list: str, primary: bool = False
) -> Optional[ImmutableDict]:
try:
return next(
QuestionnaireSchema.get_list_collectors_for_list(
section, for_list, primary
)
)
except StopIteration:
return None
@classmethod
def get_answer_ids_for_question(cls, question: Mapping) -> list[str]:
answer_ids: list[str] = []
for answer in question.get("answers", []):
answer_ids.append(answer["id"])
for option in answer.get("options", []):
if "detail_answer" in option:
answer_ids.append(option["detail_answer"]["id"])
return answer_ids
def get_first_answer_id_for_block(self, block_id: str) -> str:
answer_ids = self.get_answer_ids_for_block(block_id)
return answer_ids[0]
def get_answer_ids_for_block(self, block_id: str) -> list[str]:
block = self.get_block(block_id)
if block:
if block.get("question"):
return self.get_answer_ids_for_question(block["question"])
if block.get("question_variants"):
return self.get_answer_ids_for_question(
block["question_variants"][0]["question"]
)
return []
def get_relationship_collectors(self) -> list[ImmutableDict]:
return [
block
for block in self.get_blocks()
if block["type"] == "RelationshipCollector"
]
def get_relationship_collectors_by_list_name(
self, list_name: str
) -> Optional[list[ImmutableDict]]:
relationship_collectors = self.get_relationship_collectors()
if relationship_collectors:
return [
block
for block in relationship_collectors
if block["for_list"] == list_name
]
def get_unrelated_block_no_answer_values(
self, unrelated_answer_id: str
) -> Optional[list[str]]:
if unrelated_answers := self.get_answers_by_answer_id(unrelated_answer_id):
return [
option["value"]
for unrelated_answer in unrelated_answers
for option in unrelated_answer["options"]
if option.get("action", {}).get("type") == "AddUnrelatedRelationships"
]
@staticmethod
def get_single_string_value(schema_object: Union[Mapping, str]) -> str:
"""
Resolves an identifying string value for the schema_object. If text_plural the `other` form is returned.
:return: string value
"""
if isinstance(schema_object, str):
return schema_object
if "text_plural" in schema_object:
plural_placeholder_string: str = schema_object["text_plural"]["forms"][
"other"
]
return plural_placeholder_string
placeholder_string: str = schema_object["text"]
return placeholder_string
@staticmethod
def get_all_questions_for_block(block: Mapping) -> list[ImmutableDict]:
all_questions: list[ImmutableDict] = []
if block:
if block.get("question"):
all_questions.append(block["question"])
elif block.get("question_variants"):
for variant in block["question_variants"]:
all_questions.append(variant["question"])
return all_questions
return []
@staticmethod
def is_primary_person_block_type(block_type: str) -> bool:
primary_person_blocks = [
"PrimaryPersonListCollector",
"PrimaryPersonListAddOrEditQuestion",
]
return block_type in primary_person_blocks
@staticmethod
def is_list_block_type(block_type: str) -> bool:
list_blocks = ["ListCollector"] + LIST_COLLECTOR_CHILDREN
return block_type in list_blocks
@staticmethod
def is_question_block_type(block_type: str) -> bool:
return block_type in [
"Question",
"ListCollectorDrivingQuestion",
"ConfirmationQuestion",
]
@staticmethod
def has_address_lookup_answer(question: Mapping) -> bool:
return any(
answer
for answer in question["answers"]
if answer["type"] == "Address" and "lookup_options" in answer
)
def _get_parent_section_id_for_block(self, block_id: str) -> str:
parent_block_id = self._parent_id_map[block_id]
group_id = self._parent_id_map[parent_block_id]
section_id = self._parent_id_map[group_id]
return section_id
def _block_for_answer(self, answer_id: str) -> Optional[ImmutableDict]:
question_id = self._parent_id_map[answer_id]
block_id = self._parent_id_map[question_id]
parent_block_id = self._parent_id_map[block_id]
parent_block = self.get_block(parent_block_id)
if parent_block and parent_block["type"] == "ListCollector":
return parent_block
return self.get_block(block_id)
def _group_for_block(self, block_id: str) -> Optional[ImmutableDict]:
block = self.get_block(block_id)
parent_id = self._parent_id_map[block_id]
if block and block["type"] in LIST_COLLECTOR_CHILDREN:
group_id = self._parent_id_map[parent_id]
return self.get_group(group_id)
return self.get_group(parent_id)
def _get_error_messages(self) -> dict:
# Force translation of global error messages (stored as LazyString's) into the language of the schema.
with force_locale(self.language_code):
messages = {k: str(v) for k, v in error_messages.items()}
if "messages" in self.json:
messages.update(self.json["messages"])
return messages
| 37.589869 | 112 | 0.637166 |
ace3cfdd03fe63e1286b34af127d1c2ff89157c4 | 7,400 | py | Python | apps/bot/commands/TrustedCommands/Media.py | Xoma163/petrovich | 35acaba60e183fffbf8d7b955627953ad2c5028d | [
"MIT"
] | 4 | 2020-12-25T16:17:53.000Z | 2022-01-19T15:06:19.000Z | apps/bot/commands/TrustedCommands/Media.py | Xoma163/petrovich | 35acaba60e183fffbf8d7b955627953ad2c5028d | [
"MIT"
] | 294 | 2020-07-17T15:45:21.000Z | 2022-03-27T10:24:01.000Z | apps/bot/commands/TrustedCommands/Media.py | Xoma163/petrovich | 35acaba60e183fffbf8d7b955627953ad2c5028d | [
"MIT"
] | 3 | 2020-12-30T17:14:24.000Z | 2021-12-19T09:14:22.000Z | import json
from urllib.parse import urlparse
import requests
import youtube_dl
from bs4 import BeautifulSoup
from apps.bot.APIs.RedditVideoDownloader import RedditVideoSaver
from apps.bot.classes.Command import Command
from apps.bot.classes.consts.Consts import Platform
from apps.bot.classes.consts.Exceptions import PWarning, PError, PSkip
from apps.bot.utils.utils import get_urls_from_text
YOUTUBE_URLS = ('www.youtube.com', 'youtube.com', "www.youtu.be", "youtu.be")
REDDIT_URLS = ("www.reddit.com",)
TIKTOK_URLS = ("www.tiktok.com", 'vm.tiktok.com', 'm.tiktok.com')
INSTAGRAM_URLS = ('www.instagram.com', 'instagram.com')
MEDIA_URLS = tuple(list(YOUTUBE_URLS) + list(REDDIT_URLS) + list(TIKTOK_URLS) + list(INSTAGRAM_URLS))
class Media(Command):
name = "медиа"
help_text = "скачивает видео из Reddit/TikTok/YouTube/Instagram и присылает его"
help_texts = [
"(ссылка на видео) - скачивает видео из Reddit/TikTok/YouTube/Instagram и присылает его"
]
platforms = [Platform.TG]
def __init__(self):
super().__init__()
self.MEDIA_TRANSLATOR = {
YOUTUBE_URLS: self.get_youtube_video,
TIKTOK_URLS: self.get_tiktok_video,
REDDIT_URLS: self.get_reddit_attachment,
INSTAGRAM_URLS: self.get_instagram_attachment,
}
def start(self):
if self.event.message.command in self.full_names:
if self.event.message.args:
source = self.event.message.args_case[0]
elif self.event.fwd:
source = self.event.fwd[0].message.raw
else:
raise PWarning("Для работы команды требуются аргументы или пересылаемые сообщения")
has_command_name = True
else:
source = self.event.message.raw
has_command_name = False
method, chosen_url = self.get_method_and_chosen_url(source)
try:
attachments, title = method(chosen_url)
except PWarning as e:
# Если была вызвана команда или отправлено сообщение в лс
if has_command_name or self.event.is_from_pm:
raise e
else:
raise PSkip()
if has_command_name or self.event.is_from_pm:
return {'attachments': attachments}
else:
self.bot.delete_message(self.event.peer_id, self.event.message.id)
chosen_url_pos = source.find(chosen_url)
extra_text = source[:chosen_url_pos].strip() + "\n" + source[chosen_url_pos + len(chosen_url):].strip()
extra_text = extra_text.strip()
text = ""
if title:
text = f"{title}\n"
text += f"От пользователя {self.event.sender}"
if self.event.platform == Platform.TG:
text += f"\n[Сурс]({chosen_url})"
else:
text += f"\n{chosen_url}"
# Костыль, чтобы видосы которые шарятся с мобилы с реддита не дублировали title
if extra_text and extra_text != title:
text += f"\n{extra_text}"
return {'text': text, 'attachments': attachments}
def get_method_and_chosen_url(self, source):
method = None
urls = get_urls_from_text(source)
for url in urls:
hostname = urlparse(url).hostname
if not hostname:
raise PWarning("Не нашёл ссылки")
for k in self.MEDIA_TRANSLATOR:
if hostname in k:
return self.MEDIA_TRANSLATOR[k], url
if not method:
raise PWarning("Не youtube/tiktok/reddit/instagram ссылка")
def get_youtube_video(self, url):
ydl_params = {
'outtmpl': '%(id)s%(ext)s',
'logger': NothingLogger()
}
ydl = youtube_dl.YoutubeDL(ydl_params)
ydl.add_default_info_extractors()
try:
video_info = ydl.extract_info(url, download=False)
except youtube_dl.utils.DownloadError:
raise PWarning("Не смог найти видео по этой ссылке")
video_urls = []
if video_info['duration'] > 60:
raise PWarning("Нельзя грузить видосы > 60 секунд с ютуба")
if 'formats' in video_info:
for _format in video_info['formats']:
if _format['ext'] == 'mp4' and _format['asr']:
video_urls.append(_format)
if len(video_urls) == 0:
raise PWarning("Чёт проблемки, напишите разрабу и пришли ссылку на видео")
max_quality_video = sorted(video_urls, key=lambda x: x['format_note'])[0]
url = max_quality_video['url']
video_content = requests.get(url).content
attachments = [self.bot.upload_video(video_content, peer_id=self.event.peer_id)]
return attachments, video_info['title']
def get_tiktok_video(self, url):
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
# Ебучий тикток отдаёт контект ИНОГДА, поэтому такой костыль с пересозданием сессии
tries = 10
s = requests.Session()
video_data = None
for _ in range(tries):
r = s.get(url, headers=headers)
bs4 = BeautifulSoup(r.content, 'html.parser')
try:
video_data = json.loads(bs4.find(id='__NEXT_DATA__').contents[0])
break
except (AttributeError, ConnectionError):
s = requests.Session()
if not video_data:
raise PError("Ошибка загрузки видео с tiktok")
item_struct = video_data['props']['pageProps']['itemInfo']['itemStruct']
video_url = item_struct['video']['downloadAddr']
title = item_struct['desc']
headers['Referer'] = video_data['props']['pageProps']['seoProps']['metaParams']['canonicalHref']
r = s.get(video_url, headers=headers)
s.close()
attachments = [self.bot.upload_video(r.content, peer_id=self.event.peer_id)]
return attachments, title
def get_reddit_attachment(self, url):
rvs = RedditVideoSaver()
video = rvs.get_video_from_post(url)
attachments = [self.bot.upload_video(video, peer_id=self.event.peer_id)]
return attachments, rvs.title
def get_instagram_attachment(self, url):
r = requests.get(url)
bs4 = BeautifulSoup(r.content, 'html.parser')
try:
content_type = bs4.find('meta', attrs={'name': 'medium'}).attrs['content']
except Exception:
raise PWarning("Ссылка на инстаграмм не является видео/фото")
if content_type == 'image':
photo_url = bs4.find('meta', attrs={'property': 'og:image'}).attrs['content']
return self.bot.upload_photos([photo_url], peer_id=self.event.peer_id), ""
elif content_type == 'video':
video_url = bs4.find('meta', attrs={'property': 'og:video'}).attrs['content']
return [self.bot.upload_video(video_url, peer_id=self.event.peer_id)], ""
else:
raise PWarning("Ссылка на инстаграмм не является видео/фото")
class NothingLogger(object):
@staticmethod
def debug(msg):
pass
@staticmethod
def warning(msg):
pass
@staticmethod
def error(msg):
print(msg)
| 38.341969 | 143 | 0.614595 |
ace3d1392904670ff45b60ed669be251585a5764 | 3,370 | py | Python | notes/settings.py | abdulshak1999/notes | 8aad053dd7be4a982a328fa5291cf0cbbb1c1419 | [
"Unlicense"
] | 2 | 2021-01-19T13:27:12.000Z | 2021-03-20T14:14:19.000Z | notes/settings.py | abdulshak1999/notes | 8aad053dd7be4a982a328fa5291cf0cbbb1c1419 | [
"Unlicense"
] | null | null | null | notes/settings.py | abdulshak1999/notes | 8aad053dd7be4a982a328fa5291cf0cbbb1c1419 | [
"Unlicense"
] | 1 | 2021-01-18T09:31:22.000Z | 2021-01-18T09:31:22.000Z | from pathlib import Path
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+)nh!^gk-=ot-y9_xh#zx(v=l2m7-6tzh!$^iaa07kak%4y+vo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['addmynote.herokuapp.com', '127.0.0.1']
AUTH_USER_MODEL = 'account.Account'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'home',
'account',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.IsAuthenticated',
# ]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'notes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR/ 'templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'notes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
'TEST NAME': BASE_DIR / 'testdb.sqlite3'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
import os
STATIC_URL = '/static/'
STATICFILES_DIRS = [BASE_DIR / 'static', ]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_URL = '/accounts/signin/'
django_heroku.settings(locals())
| 25.923077 | 91 | 0.688131 |
ace3d194c662493c8e817c9dcf6f11d7d3f47f88 | 8,798 | py | Python | ament_copyright/ament_copyright/parser.py | mjbogusz/ament_lint | 1f5c6bba4c5180aa8d2b593c6f3aa8ee1309d36a | [
"Apache-2.0"
] | 23 | 2015-07-08T05:42:24.000Z | 2022-03-14T02:13:01.000Z | ament_copyright/ament_copyright/parser.py | mjbogusz/ament_lint | 1f5c6bba4c5180aa8d2b593c6f3aa8ee1309d36a | [
"Apache-2.0"
] | 292 | 2015-03-06T20:11:45.000Z | 2022-03-31T22:30:41.000Z | ament_copyright/ament_copyright/parser.py | mjbogusz/ament_lint | 1f5c6bba4c5180aa8d2b593c6f3aa8ee1309d36a | [
"Apache-2.0"
] | 71 | 2016-05-24T01:24:54.000Z | 2022-03-23T07:42:41.000Z | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from ament_copyright import ALL_FILETYPES
from ament_copyright import CONTRIBUTING_FILETYPE
from ament_copyright import get_copyright_names
from ament_copyright import get_licenses
from ament_copyright import LICENSE_FILETYPE
from ament_copyright import SOURCE_FILETYPE
from ament_copyright import UNKNOWN_IDENTIFIER
class CopyrightDescriptor:
def __init__(self, name, year_range):
self.name = name
self.year_range = year_range
def __str__(self):
s = self.name
if self.year_range:
s += ' (%s)' % self.year_range
return s
class FileDescriptor:
def __init__(self, filetype, path):
self.filetype = filetype
self.path = path
self.exists = os.path.exists(path)
self.content = None
self.license_identifier = UNKNOWN_IDENTIFIER
def read(self):
if not self.exists:
return
with open(self.path, 'r', encoding='utf-8') as h:
self.content = h.read()
def parse(self):
raise NotImplementedError()
def identify_license(self, content, license_part):
if content is None:
return
for name, license_ in get_licenses().items():
templates = getattr(license_, license_part)
for template in templates:
formatted_template = remove_formatting(template)
last_index = -1
for license_section in formatted_template.split('{copyright_holder}'):
# OK, now look for each section of the license in the incoming
# content.
index = remove_formatting(content).find(license_section.strip())
if index == -1 or index <= last_index:
# Some part of the license is not in the content, or the license
# is rearranged, this license doesn't match.
break
last_index = index
else:
# We found the license, so set it
self.license_identifier = name
break
class SourceDescriptor(FileDescriptor):
def __init__(self, path):
super(SourceDescriptor, self).__init__(SOURCE_FILETYPE, path)
self.copyrights = []
self.copyright_identifiers = []
def identify_copyright(self):
known_copyrights = get_copyright_names()
for c in self.copyrights:
found_name = c.name
for identifier, name in known_copyrights.items():
if name == found_name:
self.copyright_identifiers.append(identifier)
break
else:
self.copyright_identifiers.append(UNKNOWN_IDENTIFIER)
def parse(self):
self.read()
if not self.content:
return
# skip over coding and shebang lines
index = scan_past_coding_and_shebang_lines(self.content)
index = scan_past_empty_lines(self.content, index)
# get first comment block without leading comment tokens
block, _ = get_comment_block(self.content, index)
if not block:
return
copyrights, remaining_block = search_copyright_information(block)
if not copyrights:
return None
self.copyrights = copyrights
self.identify_copyright()
content = '{copyright}' + remaining_block
self.identify_license(content, 'file_headers')
class ContributingDescriptor(FileDescriptor):
def __init__(self, path):
super(ContributingDescriptor, self).__init__(CONTRIBUTING_FILETYPE, path)
def parse(self):
self.read()
if not self.content:
return
self.identify_license(self.content, 'contributing_files')
class LicenseDescriptor(FileDescriptor):
def __init__(self, path):
super(LicenseDescriptor, self).__init__(LICENSE_FILETYPE, path)
def parse(self):
self.read()
if not self.content:
return
self.identify_license(self.content, 'license_files')
def parse_file(path):
filetype = determine_filetype(path)
if filetype == SOURCE_FILETYPE:
d = SourceDescriptor(path)
elif filetype == CONTRIBUTING_FILETYPE:
d = ContributingDescriptor(path)
elif filetype == LICENSE_FILETYPE:
d = LicenseDescriptor(path)
else:
return None
d.parse()
return d
def determine_filetype(path):
basename = os.path.basename(path)
for filetype, filename in ALL_FILETYPES.items():
if basename == filename:
return filetype
return SOURCE_FILETYPE
def search_copyright_information(content):
# regex for matching years or year ranges (yyyy-yyyy) separated by colons
year = r'\d{4}'
year_range = '%s-%s' % (year, year)
year_or_year_range = '(?:%s|%s)' % (year, year_range)
pattern = r'^[^\n\r]?\s*(?:\\copyright\s*)?' \
r'Copyright(?:\s+\(c\))?\s+(%s(?:,\s*%s)*),?\s+([^\n\r]+)$' % \
(year_or_year_range, year_or_year_range)
regex = re.compile(pattern, re.DOTALL | re.MULTILINE)
copyrights = []
while True:
match = regex.search(content)
if not match:
break
years_span, name_span = match.span(1), match.span(2)
years = content[years_span[0]:years_span[1]]
name = content[name_span[0]:name_span[1]]
copyrights.append(CopyrightDescriptor(name, years))
content = content[name_span[1]:]
return copyrights, content
def scan_past_coding_and_shebang_lines(content):
index = 0
while (
is_comment_line(content, index) and
(is_coding_line(content, index) or
is_shebang_line(content, index))
):
index = get_index_of_next_line(content, index)
return index
def get_index_of_next_line(content, index):
index_n = content.find('\n', index)
index_r = content.find('\r', index)
index_rn = content.find('\r\n', index)
indices = set()
if index_n != -1:
indices.add(index_n)
if index_r != -1:
indices.add(index_r)
if index_rn != -1:
indices.add(index_rn)
if not indices:
return len(content)
index = min(indices)
if index == index_rn:
return index + 2
return index + 1
def is_comment_line(content, index):
# skip over optional BOM
if index == 0 and content[0] == '\ufeff':
index = 1
return content[index] == '#' or content[index:index + 1] == '//'
def is_coding_line(content, index):
end_index = get_index_of_next_line(content, index)
line = content[index:end_index]
return 'coding=' in line or 'coding:' in line
def is_shebang_line(content, index):
# skip over optional BOM
if index == 0 and content[0] == '\ufeff':
index = 1
return content[index:index + 2] == '#!'
def get_comment_block(content, index):
# regex for matching the beginning of the first comment
# check for doxygen comments (///) before regular comments (//)
pattern = '^(#|///|//)'
# also accept BOM if present
if index == 0 and content[0] == '\ufeff':
pattern = pattern[0] + '\ufeff' + pattern[1:]
regex = re.compile(pattern, re.MULTILINE)
match = regex.search(content, index)
if not match:
return None, None
comment_token = match.group(1)
start_index = match.start(1)
end_index = start_index
while True:
end_index = get_index_of_next_line(content, end_index)
if content[end_index:end_index + len(comment_token)] != comment_token:
break
block = content[start_index:end_index]
lines = block.splitlines()
lines = [line[len(comment_token) + 1:] for line in lines]
return '\n'.join(lines), start_index + len(comment_token) + 1
def scan_past_empty_lines(content, index):
while is_empty_line(content, index):
index = get_index_of_next_line(content, index)
return index
def is_empty_line(content, index):
return get_index_of_next_line(content, index) == index + 1
def remove_formatting(text):
return ' '.join(filter(None, text.split()))
| 30.442907 | 88 | 0.635826 |
ace3d1ead9a095094dd6fc73a52a03e32b0c19fb | 395 | py | Python | algorithms/Python/strings/beautiful binary string.py | Kumbong/hackerrank | 36125f3a17c3e0f1fa889495e8ad33b0aa424552 | [
"MIT"
] | 8 | 2019-09-19T19:38:09.000Z | 2022-02-14T13:59:37.000Z | algorithms/Python/strings/beautiful binary string.py | Kumbong/hacker-rank | 36125f3a17c3e0f1fa889495e8ad33b0aa424552 | [
"MIT"
] | null | null | null | algorithms/Python/strings/beautiful binary string.py | Kumbong/hacker-rank | 36125f3a17c3e0f1fa889495e8ad33b0aa424552 | [
"MIT"
] | 7 | 2019-09-23T13:17:27.000Z | 2022-01-27T18:02:16.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the beautifulBinaryString function below.
def beautifulBinaryString(b):
return b.count('010')
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
b = input()
result = beautifulBinaryString(b)
fptr.write(str(result) + '\n')
fptr.close()
| 15.8 | 52 | 0.658228 |
ace3d2441b46b79cbde021aff933a673758d3e96 | 1,914 | py | Python | NPRSynthText/renderpygame.py | rohitsaluja22/OCR-On-the-go | 64ea285178599aa130533e4421a63597a89a4f7c | [
"BSD-3-Clause"
] | 25 | 2019-07-03T12:11:38.000Z | 2022-02-22T15:56:55.000Z | NPRSynthText/renderpygame.py | rohitsaluja22/OCR-On-the-go | 64ea285178599aa130533e4421a63597a89a4f7c | [
"BSD-3-Clause"
] | 4 | 2019-06-14T01:52:11.000Z | 2020-04-03T04:51:30.000Z | StreetBoardSynthText/renderpygame.py | rohitsaluja22/OCR-On-the-go | 64ea285178599aa130533e4421a63597a89a4f7c | [
"BSD-3-Clause"
] | 9 | 2019-06-21T14:58:05.000Z | 2021-05-02T18:43:06.000Z | # Pygame setup and create root window
#https://www.pygame.org/docs/ref/font.html#pygame.font.Font
#https://www.pygame.org/docs/ref/freetype.html#pygame.freetype.Font
# -*- coding: utf-8 -*-
import time
import pygame
import pygame.freetype
pygame.font.init()
pygame.freetype.init()
screen = pygame.display.set_mode((320, 200))
empty = pygame.Surface((320, 200))
'''101Lohit/Lohit14042007.ttf
102Mukti/Mukti1p99PR.ttf
1SolaimanLipi/SolaimanLipi.ttf
2Nikosh/Nikosh.ttf
3AmarBangla/AmarBanglaBold.ttf
3AmarBangla/AmarBangla.ttf
4SutonnyMJ/SutonnyOMJ.ttf
no upto 9
https://www.omicronlab.com/bangla-fonts.html
kalpurush.ttf 47
Siyamrupali.ttf 48
AdorshoLipi_20-07-2007.ttf 46
AponaLohit.ttf 50
Bangla.ttf 37
BenSenHandwriting.ttf 35
BenSen.ttf 35
Nikosh.ttf 35
SolaimanLipi_20-04-07.ttf 40
akaashnormal.ttf 33
Lohit_14-04-2007.ttf 50
mitra.ttf 44
Mukti_1.99_PR.ttf 39
muktinarrow.ttf 50
NikoshBAN.ttf 35
NikoshGrameen.ttf 43
NikoshLightBan.ttf 35
NikoshLight.ttf 35
sagarnormal.ttf 40
'''
DATA_PATH = "/home/rohit/src2/1SceneTextBangla/data/fonts/omicronlab/"
#unistr = "♛"
#font_file = pygame.font.match_font("Shobhika") # Select and Shobhika Sanskrit_2003
#print(font_file)
font_file = DATA_PATH + "sagarnormal.ttf"#"/home/rohit/src2/1SceneTextBangla/data/fonts/9ChitraMJ/ChitraMJ-BoldItalic.ttf"
print(font_file)
font = pygame.freetype.Font(font_file,30)
print(font.get_sized_height())
#font = pygame.font.Font(font_file, 30) # open the font
#writing,dg = font.render(chr(0x0915),(0, 0, 0),(255, 255, 255))
#writing,dg = font.render("द",(0, 0, 0),(255, 255, 255)) # Render text on a surface
rect = font.render_to(empty,(40,40),"আচওয়ালীপঈম",(0, 0, 0),(255, 255, 255))
#writing = font.render()
screen.fill((255, 255, 255)) # Clear the background
screen.blit(empty, (10, 10)) # Blit the text surface on the background
pygame.display.flip() # Refresh the display
input() # Wait for input before quitting
| 30.870968 | 122 | 0.760188 |
ace3d26b9d24aa4f5a9a9793669522ad4646bc68 | 452 | py | Python | tests/p2p/test_identify.py | odesenfans/pyaleph | 1e55437cbfd9db8162e70b930d2124bdf42f6f22 | [
"MIT"
] | null | null | null | tests/p2p/test_identify.py | odesenfans/pyaleph | 1e55437cbfd9db8162e70b930d2124bdf42f6f22 | [
"MIT"
] | null | null | null | tests/p2p/test_identify.py | odesenfans/pyaleph | 1e55437cbfd9db8162e70b930d2124bdf42f6f22 | [
"MIT"
] | null | null | null | from typing import Tuple
import pytest
from p2pclient import Client as P2PClient
@pytest.mark.asyncio
@pytest.mark.parametrize("p2p_clients", [1], indirect=True)
async def test_p2p_client_identify(p2p_clients: Tuple[P2PClient]):
"""Sanity check to make sure that the fixture deploys the P2P daemon and that the client can reach it."""
assert len(p2p_clients) == 1
client = p2p_clients[0]
_peer_id, _maddrs = await client.identify()
| 30.133333 | 109 | 0.754425 |
ace3d27969a1c3b19cbb7ffcf3325fe95993badc | 5,496 | py | Python | stratum-mining-litecoin-stratum-lite/stratum-mining-litecoin-stratum-lite/lib/block_template.py | cryptominingpool204/cryptominingpool204.github.io | 0f8878e6a38aeb370971960b032bab007f69c151 | [
"MIT"
] | null | null | null | stratum-mining-litecoin-stratum-lite/stratum-mining-litecoin-stratum-lite/lib/block_template.py | cryptominingpool204/cryptominingpool204.github.io | 0f8878e6a38aeb370971960b032bab007f69c151 | [
"MIT"
] | null | null | null | stratum-mining-litecoin-stratum-lite/stratum-mining-litecoin-stratum-lite/lib/block_template.py | cryptominingpool204/cryptominingpool204.github.io | 0f8878e6a38aeb370971960b032bab007f69c151 | [
"MIT"
] | null | null | null | import StringIO
import binascii
import struct
import util
import merkletree
import halfnode
from coinbasetx import CoinbaseTransaction
# Remove dependency to settings, coinbase extras should be
# provided from coinbaser
import settings
class BlockTemplate(halfnode.CBlock):
'''Template is used for generating new jobs for clients.
Let's iterate extranonce1, extranonce2, ntime and nonce
to find out valid bitcoin block!'''
coinbase_transaction_class = CoinbaseTransaction
def __init__(self, timestamper, coinbaser, job_id):
super(BlockTemplate, self).__init__()
self.job_id = job_id
self.timestamper = timestamper
self.coinbaser = coinbaser
self.prevhash_bin = '' # reversed binary form of prevhash
self.prevhash_hex = ''
self.timedelta = 0
self.curtime = 0
self.target = 0
#self.coinbase_hex = None
self.merkletree = None
self.broadcast_args = []
# List of 4-tuples (extranonce1, extranonce2, ntime, nonce)
# registers already submitted and checked shares
# There may be registered also invalid shares inside!
self.submits = []
def fill_from_rpc(self, data):
'''Convert getblocktemplate result into BlockTemplate instance'''
#txhashes = [None] + [ binascii.unhexlify(t['hash']) for t in data['transactions'] ]
txhashes = [None] + [ util.ser_uint256(int(t['hash'], 16)) for t in data['transactions'] ]
mt = merkletree.MerkleTree(txhashes)
coinbase = self.coinbase_transaction_class(self.timestamper, self.coinbaser, data['coinbasevalue'],
data['coinbaseaux']['flags'], data['height'], settings.COINBASE_EXTRAS)
self.height = data['height']
self.nVersion = data['version']
self.hashPrevBlock = int(data['previousblockhash'], 16)
self.nBits = int(data['bits'], 16)
self.hashMerkleRoot = 0
self.nTime = 0
self.nNonce = 0
self.vtx = [ coinbase, ]
for tx in data['transactions']:
t = halfnode.CTransaction()
t.deserialize(StringIO.StringIO(binascii.unhexlify(tx['data'])))
self.vtx.append(t)
self.curtime = data['curtime']
self.timedelta = self.curtime - int(self.timestamper.time())
self.merkletree = mt
self.target = util.uint256_from_compact(self.nBits)
# Reversed prevhash
self.prevhash_bin = binascii.unhexlify(util.reverse_hash(data['previousblockhash']))
self.prevhash_hex = "%064x" % self.hashPrevBlock
self.broadcast_args = self.build_broadcast_args()
def register_submit(self, extranonce1, extranonce2, ntime, nonce):
'''Client submitted some solution. Let's register it to
prevent double submissions.'''
t = (extranonce1, extranonce2, ntime, nonce)
if t not in self.submits:
self.submits.append(t)
return True
return False
def build_broadcast_args(self):
'''Build parameters of mining.notify call. All clients
may receive the same params, because they include
their unique extranonce1 into the coinbase, so every
coinbase_hash (and then merkle_root) will be unique as well.'''
job_id = self.job_id
prevhash = binascii.hexlify(self.prevhash_bin)
(coinb1, coinb2) = [ binascii.hexlify(x) for x in self.vtx[0]._serialized ]
merkle_branch = [ binascii.hexlify(x) for x in self.merkletree._steps ]
version = binascii.hexlify(struct.pack(">i", self.nVersion))
nbits = binascii.hexlify(struct.pack(">I", self.nBits))
ntime = binascii.hexlify(struct.pack(">I", self.curtime))
clean_jobs = True
return (job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs)
def serialize_coinbase(self, extranonce1, extranonce2):
'''Serialize coinbase with given extranonce1 and extranonce2
in binary form'''
(part1, part2) = self.vtx[0]._serialized
return part1 + extranonce1 + extranonce2 + part2
def check_ntime(self, ntime):
'''Check for ntime restrictions.'''
if ntime < self.curtime:
return False
if ntime > (self.timestamper.time() + 1000):
# Be strict on ntime into the near future
# may be unnecessary
return False
return True
def serialize_header(self, merkle_root_int, ntime_bin, nonce_bin):
'''Serialize header for calculating block hash'''
r = struct.pack(">i", self.nVersion)
r += self.prevhash_bin
r += util.ser_uint256_be(merkle_root_int)
r += ntime_bin
r += struct.pack(">I", self.nBits)
r += nonce_bin
return r
def finalize(self, merkle_root_int, extranonce1_bin, extranonce2_bin, ntime, nonce):
'''Take all parameters required to compile block candidate.
self.is_valid() should return True then...'''
self.hashMerkleRoot = merkle_root_int
self.nTime = ntime
self.nNonce = nonce
self.vtx[0].set_extranonce(extranonce1_bin + extranonce2_bin)
self.sha256 = None # We changed block parameters, let's reset sha256 cache
| 38.978723 | 107 | 0.62409 |
ace3d27c4f3b03c7b128e586f5b72498df2137ec | 3,119 | py | Python | Chapter04/faster_r-cnn.py | arifmudi/Advanced-Deep-Learning-with-Python | 180cae0ec62857670bfc2a4d566857030adc4905 | [
"MIT"
] | 107 | 2019-12-09T13:49:33.000Z | 2021-08-31T03:23:39.000Z | Chapter04/faster_r-cnn.py | bwantan/Advanced-Deep-Learning-with-Python | 083f63dfc0c5f404143552ea20d560b06ec303f5 | [
"MIT"
] | null | null | null | Chapter04/faster_r-cnn.py | bwantan/Advanced-Deep-Learning-with-Python | 083f63dfc0c5f404143552ea20d560b06ec303f5 | [
"MIT"
] | 48 | 2019-12-05T05:30:51.000Z | 2022-03-22T02:03:00.000Z | import os.path
import cv2
import numpy as np
import requests
import torchvision
import torchvision.transforms as transforms
print("Faster R-CNN object detection")
# COCO dataset class names
classes = [
'background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'street sign',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack',
'umbrella', 'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',
'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'hair brush']
# Download object detection image
image_file = 'source_2.png'
if not os.path.isfile(image_file):
url = "https://github.com/ivan-vasilev/advanced-deep-learning-with-python/blob/master/chapter04-detection-segmentation/source_2.png"
r = requests.get(url)
with open(image_file, 'wb') as f:
f.write(r.content)
# load the pytorch model
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# set the model in evaluation mode
model.eval()
# read the image file
img = cv2.imread(image_file)
# transform the input to tensor
transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
nn_input = transform(img)
output = model([nn_input])
# random color for each class
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# iterate over the network output for all boxes
for box, box_class, score in zip(output[0]['boxes'].detach().numpy(),
output[0]['labels'].detach().numpy(),
output[0]['scores'].detach().numpy()):
# filter the boxes by score
if score > 0.5:
# transform bounding box format
box = [(box[0], box[1]), (box[2], box[3])]
# select class color
color = colors[box_class]
# extract class name
class_name = classes[box_class]
# draw the bounding box
cv2.rectangle(img=img,
pt1=box[0],
pt2=box[1],
color=color,
thickness=2)
# display the box class label
cv2.putText(img=img,
text=class_name,
org=box[0],
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=color,
thickness=2)
cv2.imshow("Object detection", img)
cv2.waitKey()
| 36.267442 | 136 | 0.600192 |
ace3d2f1c4d592d4c8d90771645acdf7f1d92a5c | 49,751 | py | Python | clib/fakeoftable.py | boldsort/faucet | 451fbaa8ebce1822e06615c9da947f1dc7e3e416 | [
"Apache-2.0"
] | 3 | 2021-04-07T19:10:12.000Z | 2021-12-30T17:11:14.000Z | clib/fakeoftable.py | boldsort/faucet | 451fbaa8ebce1822e06615c9da947f1dc7e3e416 | [
"Apache-2.0"
] | null | null | null | clib/fakeoftable.py | boldsort/faucet | 451fbaa8ebce1822e06615c9da947f1dc7e3e416 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import json
import ast
import heapq
import pprint
from collections import OrderedDict
from bitstring import Bits
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
from ryu.ofproto import ofproto_parser as ofp_parser
from ryu.lib import addrconv
CONTROLLER_PORT = 4294967293
IN_PORT = 4294967288
class FakeOFTableException(Exception):
"""Indicates an erroneous flow or group mod"""
class DFS:
"""Provides a way of tracking the search through the FakeOFNetwork"""
visited = None
heap = None
def __init__(self):
self.visited = {}
self.heap = []
def visit(self, dp_id, pkt):
"""
Notifies the DFS that a packet has visited the dp_id
Args:
dp_id: The DP ID for the node that is being visited
pkt: The packet that is visiting the node
"""
self.visited.setdefault(dp_id, [])
if pkt not in self.visited[dp_id]:
self.visited[dp_id].append(pkt)
def has_visited(self, dp_id, pkt):
"""
Returns true if the packet has visited the node DP ID before
Args:
dp_id: The DP ID for the node is being visited
pkt: The packet that is visiting the node
"""
if dp_id in self.visited:
if pkt in self.visited[dp_id]:
return True
return False
def peek(self):
"""
Returns the first item in the heap (with the highest priority (smallest value))
with popping from the heap
Returns:
dp_id, pkt
"""
if not self.heap:
return None, None
item = self.heap[0]
return item[1][0], item[1][1]
def push(self, dp_id, pkt, priority):
"""
Pushes the dp_id and pkt onto the heap with priority
Args:
dp_id:
pkt:
priority:
"""
heapq.heappush(self.heap, (priority, (dp_id, tuple(pkt.items()))))
def pop(self):
"""
Obtains the item with the highest priority
Returns:
dp_id, pkt
"""
if not self.heap:
return None, None
item = heapq.heappop(self.heap)
return item[1][0], item[1][1]
class FakeOFNetwork:
"""
FakeOFNetwork is a virtual openflow pipeline used for testing openflow controllers
The network contains multiple FakeOFTables to represent multiple switches in a network
"""
def __init__(self, valves_manager, num_tables, requires_tfm=True):
"""
Args:
valves_manager (ValvesManager): Valves manager class to resolve stack traversals
num_tables (int): The number of tables to configure in each FakeOFTable
requires_tfm (bool): Whether TFMs are required
"""
self.valves_manager = valves_manager
self.tables = {}
for dp_id in self.valves_manager.valves:
self.tables[dp_id] = FakeOFTable(dp_id, num_tables, requires_tfm)
def apply_ofmsgs(self, dp_id, ofmsgs, ignore_errors=False):
"""Applies ofmsgs to a FakeOFTable for DP ID"""
self.tables[dp_id].apply_ofmsgs(ofmsgs, ignore_errors=ignore_errors)
def print_table(self, dp_id):
"""Prints the table in string format to STDERR"""
sys.stderr.write('TABLE %x' % dp_id)
sys.stderr.write(str(self.tables[dp_id]) + '\n')
sys.stderr.write('======================\n\n')
def shortest_path_len(self, src_dpid, dst_dpid):
"""Returns the length of the shortest path from the source to the destination"""
src_valve = self.valves_manager.valves[src_dpid]
dst_valve = self.valves_manager.valves[dst_dpid]
if src_valve == dst_valve:
return 1
elif src_valve.dp.stack and dst_valve.dp.stack:
return len(src_valve.dp.stack.shortest_path(dst_valve.dp.name))
else:
return 2
def is_output(self, match, src_dpid, dst_dpid, port=None, vid=None, trace=False):
"""
Traverses a packet through the network until we have searched everything
or successfully output a packet to the destination with expected port and vid
If port is None return True if output to any port (including special ports)
regardless of VLAN tag.
If vid is None return True if output to specified port regardless of VLAN tag.
If vid OFPVID_PRESENT bit is 0, return True if output packet does not have
a VLAN tag OR packet OFPVID_PRESENT is 0
Args:
match (dict): A dictionary keyed by header field names with values
src_dpid: The source DP ID of the match packet entering the Fake OF network
dst_dpid: The expected destination DP ID of the packet match
port: The expected output port on the destination DP
vid: The expected output vid on the destination DP
trace (bool): Print the trace of traversing the tables
Returns:
true if packets with match fields is output to port with correct VLAN
"""
found = False
dfs = DFS()
priority = self.shortest_path_len(src_dpid, dst_dpid)
pkt = match.copy()
dfs.push(src_dpid, pkt, priority)
dfs.visit(src_dpid, pkt)
while not found:
# Search through the packet paths until we have searched everything or
# successfully output the packet to the destination in the expected format
dp_id, pkt = dfs.pop()
if dp_id is None or pkt is None:
break
pkt = dict(pkt)
if dp_id == dst_dpid:
# A packet has reached the destination, so test for the output
found = self.tables[dp_id].is_full_output(pkt, port, vid, trace=trace)
if not found and trace:
# A packet on the destination DP is not output in the expected state so
# continue searching (flood reflection)
sys.stderr.write('Output is away from destination\n')
if not found:
# Packet not reached destination, so continue traversing
if trace:
sys.stderr.write('FakeOFTable %s: %s\n' % (dp_id, pkt))
port_outputs = self.tables[dp_id].get_port_outputs(pkt, trace=trace)
valve = self.valves_manager.valves[dp_id]
for out_port, out_pkts in port_outputs.items():
if out_port == IN_PORT:
# Rebind output to the packet in_port value
out_port = pkt['in_port']
if out_port not in valve.dp.ports:
# Ignore output to improper ports & controller
# TODO: Here we should actually send the packet to the
# controller, and maybe install necessary rules to
# help testing routing implementations
continue
for out_pkt in out_pkts:
port_obj = valve.dp.ports[out_port]
if port_obj.stack:
# Need to continue traversing through the FakeOFNetwork
adj_port = port_obj.stack['port']
adj_dpid = port_obj.stack['dp'].dp_id
new_pkt = out_pkt.copy()
new_pkt['in_port'] = adj_port.number
if not dfs.has_visited(adj_dpid, new_pkt):
# Add packet to the heap if we have not visited the node with
# this packet before
priority = self.shortest_path_len(adj_dpid, dst_dpid)
dfs.push(adj_dpid, new_pkt, priority)
dfs.visit(adj_dpid, new_pkt)
elif trace:
# Output to non-stack port, can ignore this output
sys.stderr.write(
'Ignoring non-stack output %s:%s\n' % (valve.dp.name, out_port))
if trace:
sys.stderr.write('\n')
return found
def table_state(self, dp_id):
"""Return tuple of table hash & table str"""
return self.tables[dp_id].table_state()
def hash_table(self, dp_id):
"""Return a hash of a single FakeOFTable"""
return self.tables[dp_id].__hash__()
class FakeOFTable:
"""Fake OFTable is a virtual openflow pipeline used for testing openflow
controllers.
The tables are populated using apply_ofmsgs and can be queried with
is_output.
"""
def __init__(self, dp_id, num_tables=1, requires_tfm=True):
self.dp_id = dp_id
self.tables = [[] for _ in range(0, num_tables)]
self.groups = {}
self.requires_tfm = requires_tfm
self.tfm = {}
def table_state(self):
"""Return tuple of table hash & table str"""
table_str = str(self.tables)
return (hash(frozenset(table_str)), table_str)
def __hash__(self):
"""Return a host of the tables"""
return hash(frozenset(str(self.tables)))
def _apply_groupmod(self, ofmsg):
"""Maintain group table."""
def _del(_ofmsg, group_id):
if group_id == ofp.OFPG_ALL:
self.groups = {}
return
if group_id in self.groups:
del self.groups[group_id]
def _add(ofmsg, group_id):
if group_id in self.groups:
raise FakeOFTableException(
'group already in group table: %s' % ofmsg)
self.groups[group_id] = ofmsg
def _modify(ofmsg, group_id):
if group_id not in self.groups:
raise FakeOFTableException(
'group not in group table: %s' % ofmsg)
self.groups[group_id] = ofmsg
_groupmod_handlers = {
ofp.OFPGC_DELETE: _del,
ofp.OFPGC_ADD: _add,
ofp.OFPGC_MODIFY: _modify,
}
_groupmod_handlers[ofmsg.command](ofmsg, ofmsg.group_id)
def _apply_flowmod(self, ofmsg):
"""Adds, Deletes and modify flow modification messages are applied
according to section 6.4 of the OpenFlow 1.3 specification."""
def _validate_flowmod_tfm(table_id, tfm_body, ofmsg):
if not self.requires_tfm:
return
if table_id == ofp.OFPTT_ALL:
if ofmsg.match.items() and not self.tfm:
raise FakeOFTableException(
'got %s with matches before TFM that defines tables'
% ofmsg)
return
if tfm_body is None:
raise FakeOFTableException(
'got %s before TFM that defines table %u' % (
ofmsg, table_id
)
)
def _add(table, flowmod):
# From the 1.3 spec, section 6.4:
# For add requests (OFPFC_ADD) with the
# OFPFF_CHECK_OVERLAP flag set, the switch must first
# check for any overlapping flow entries in the
# requested table. Two flow entries overlap if a
# single packet may match both, and both flow entries
# have the same priority, but the two flow entries
# don't have the exact same match. If an overlap
# conflict exists between an existing flow entry and
# the add request, the switch must refuse the addition
# and respond with an ofp_error_msg with
# OFPET_FLOW_MOD_FAILED type and OFPFMFC_OVERLAP code.
#
# Without the check overlap flag it seems like it is
# possible that we can have overlapping flow table
# entries which will cause ambiguous behaviour. This is
# obviously unnacceptable so we will assume this is
# always set
for fte in table:
if flowmod.fte_matches(fte, strict=True):
table.remove(fte)
break
if flowmod.overlaps(fte):
raise FakeOFTableException(
'Overlapping flowmods {} and {}'.format(
flowmod, fte))
table.append(flowmod)
def _del(table, flowmod):
removals = [fte for fte in table if flowmod.fte_matches(fte)]
for fte in removals:
table.remove(fte)
def _del_strict(table, flowmod):
for fte in table:
if flowmod.fte_matches(fte, strict=True):
table.remove(fte)
break
def _modify(table, flowmod):
for fte in table:
if flowmod.fte_matches(fte):
fte.instructions = flowmod.instructions
def _modify_strict(table, flowmod):
for fte in table:
if flowmod.fte_matches(fte, strict=True):
fte.instructions = flowmod.instructions
break
_flowmod_handlers = {
ofp.OFPFC_ADD: _add,
ofp.OFPFC_DELETE: _del,
ofp.OFPFC_DELETE_STRICT: _del_strict,
ofp.OFPFC_MODIFY: _modify,
ofp.OFPFC_MODIFY_STRICT: _modify_strict,
}
table_id = ofmsg.table_id
tfm_body = self.tfm.get(table_id, None)
if table_id == ofp.OFPTT_ALL or table_id is None:
tables = self.tables
else:
tables = [self.tables[table_id]]
_validate_flowmod_tfm(table_id, tfm_body, ofmsg)
flowmod = FlowMod(ofmsg)
for table in tables:
_flowmod_handlers[ofmsg.command](table, flowmod)
if tfm_body:
for table in tables:
entries = len(table)
if entries > tfm_body.max_entries:
tfm_table_details = '%s : table %u %s full (%u/%u)' % (
self.dp_id, table_id, tfm_body.name, entries, tfm_body.max_entries)
flow_dump = '\n\n'.join(
(tfm_table_details, str(ofmsg), str(tfm_body)))
raise FakeOFTableException(flow_dump)
def _apply_tfm(self, ofmsg):
self.tfm = {body.table_id: body for body in ofmsg.body}
def _apply_flowstats(self, ofmsg):
"""Update state of flow tables to match an OFPFlowStatsReply message.
This assumes a tfm is not required."""
self.tables = []
self.requires_tfm = False
self.tfm = {}
for stat in ofmsg.body:
while len(self.tables) <= stat.table_id:
self.tables.append([])
self.tables[stat.table_id].append(FlowMod(stat))
def apply_ofmsgs(self, ofmsgs, ignore_errors=False):
"""Update state of test flow tables."""
for ofmsg in ofmsgs:
try:
if isinstance(ofmsg, parser.OFPBarrierRequest):
continue
if isinstance(ofmsg, parser.OFPPacketOut):
continue
if isinstance(ofmsg, parser.OFPSetConfig):
continue
if isinstance(ofmsg, parser.OFPSetAsync):
continue
if isinstance(ofmsg, parser.OFPDescStatsRequest):
continue
if isinstance(ofmsg, parser.OFPMeterMod):
# TODO: handle OFPMeterMod
continue
if isinstance(ofmsg, parser.OFPTableFeaturesStatsRequest):
self._apply_tfm(ofmsg)
continue
if isinstance(ofmsg, parser.OFPGroupMod):
self._apply_groupmod(ofmsg)
continue
if isinstance(ofmsg, parser.OFPFlowMod):
self._apply_flowmod(ofmsg)
self.sort_tables()
continue
if isinstance(ofmsg, parser.OFPFlowStatsReply):
self._apply_flowstats(ofmsg)
self.sort_tables()
continue
except FakeOFTableException:
if not ignore_errors:
raise
if not ignore_errors:
raise FakeOFTableException('Unsupported flow %s' % str(ofmsg))
def single_table_lookup(self, match, table_id, trace=False):
"""
Searches through a single table with `table_id` for entries
that will be applied to the packet with fields represented by match
Args:
match (dict): A dictionary keyed by header field names with values
table_id (int): The table ID to send the match packet through
trace (bool): Print the trace of traversing the table
Returns:
matching_fte: First matching flowmod in the table
"""
packet_dict = match.copy()
table = self.tables[table_id]
matching_fte = None
# Find matching flowmods
for fte in table:
if fte.pkt_matches(packet_dict):
matching_fte = fte
break
if trace:
sys.stderr.write('%s: %s\n' % (table_id, matching_fte))
return matching_fte
def _process_instruction(self, match, instruction):
"""
Process an instructions actions into an output dictionary
Args:
match (dict): A dictionary keyed by header field names with values
instruction: The instruction being applied to the packet match
Returns:
outputs: OrderedDict of an output port to list of output packets
packet_dict: final dictionary of the packet
"""
outputs = OrderedDict()
packet_dict = match.copy()
pending_actions = []
for action in instruction.actions:
if action.type == ofp.OFPAT_OUTPUT:
# Save the packet that is output to a port
outputs.setdefault(action.port, [])
outputs[action.port].append(packet_dict.copy())
pending_actions = []
continue
pending_actions.append(action)
if action.type == ofp.OFPAT_SET_FIELD:
# Set field, modify a packet header
packet_dict[action.key] = action.value
elif action.type == ofp.OFPAT_PUSH_VLAN:
if 'vlan_vid' in packet_dict and packet_dict['vlan_vid'] & ofp.OFPVID_PRESENT:
# Pushing on another tag, so create another
# field for the encapsulated VID
packet_dict['encap_vid'] = packet_dict['vlan_vid']
# Push the VLAN header to the packet
packet_dict['vlan_vid'] = ofp.OFPVID_PRESENT
elif action.type == ofp.OFPAT_POP_VLAN:
# Remove VLAN header from the packet
packet_dict.pop('vlan_vid')
if 'vlan_pcp' in packet_dict:
# Also make sure to pop off any VLAN header information too
packet_dict.pop('vlan_pcp')
if 'encap_vid' in packet_dict:
# Move the encapsulated VID to the front
packet_dict['vlan_vid'] = packet_dict['encap_vid']
packet_dict.pop('encap_vid')
else:
packet_dict['vlan_vid'] = 0
elif action.type == ofp.OFPAT_GROUP:
# Group mod so make sure that we process the group buckets
if action.group_id not in self.groups:
raise FakeOFTableException('output group not in group table: %s' % action)
buckets = self.groups[action.group_id].buckets
for bucket in buckets:
bucket_outputs, _, _ = self._process_instruction(packet_dict, bucket)
for out_port, out_pkts in bucket_outputs.items():
outputs.setdefault(out_port, [])
outputs[out_port].extend(out_pkts)
pending_actions = []
return outputs, packet_dict, pending_actions
def get_table_output(self, match, table_id, trace=False):
"""
Send a packet through a single table and return the output
ports mapped to the output packet
Args:
match (dict): A dictionary keyed by header field names with values
table_id (int): The table ID to send the packet match through
trace (bool): Print the trace of traversing the table
Returns:
outputs: OrderedDict of an output port to output packet map
packet_dict: The last version of the packet
next_table: Table ID of the next table
"""
next_table = None
packet_dict = match.copy()
outputs = OrderedDict()
matching_fte = self.single_table_lookup(match, table_id, trace)
pending_actions = []
if matching_fte:
for instruction in matching_fte.instructions:
if instruction.type == ofp.OFPIT_GOTO_TABLE:
if table_id < instruction.table_id:
next_table = instruction.table_id
else:
raise FakeOFTableException('goto to lower table ID')
elif instruction.type == ofp.OFPIT_APPLY_ACTIONS:
if not instruction.actions:
raise FakeOFTableException('no-op instruction actions')
instruction_outputs, packet_dict, pending_actions = self._process_instruction(
packet_dict, instruction)
for out_port, out_pkts in instruction_outputs.items():
outputs.setdefault(out_port, [])
outputs[out_port].extend(out_pkts)
elif instruction.type == ofp.OFPIT_WRITE_METADATA:
metadata = packet_dict.get('metadata', 0)
mask = instruction.metadata_mask
mask_compl = mask ^ 0xFFFFFFFFFFFFFFFF
packet_dict['metadata'] = (metadata & mask_compl) | (instruction.metadata & mask)
if next_table:
pending_actions = []
if pending_actions:
raise FakeOFTableException('flow performs actions on packet after output with no goto: %s' % matching_fte)
return outputs, packet_dict, next_table
def get_output(self, match, trace=False):
"""
Get all of the outputs of the tables with the output packets
for each table in the FakeOFTable that match progresses through
Args:
match (dict): A dictionary keyed by header field names with values
trace (bool): Print the trace of traversing the table
Returns:
table_outputs: map from table_id output to output ports & packets
for that table
"""
table_outputs = {}
table_id = 0
next_table = True
packet_dict = match.copy()
while next_table:
next_table = False
outputs, packet_dict, next_table_id = self.get_table_output(
packet_dict, table_id, trace)
table_outputs[table_id] = outputs
next_table = next_table_id is not None
table_id = next_table_id
return table_outputs
def get_port_outputs(self, match, trace=False):
"""
Get all of the outputs of the tables with the output packets
for each table in the FakeOFTable that match progresses through
Args:
match (dict): A dictionary keyed by header field names with value
trace (bool): Print the trace of traversing the table
Returns:
table_outputs: Map from output port number to a list of unique output packets
"""
port_outputs = {}
table_id = 0
next_table = True
packet_dict = match.copy()
while next_table:
next_table = False
outputs, packet_dict, next_table_id = self.get_table_output(
packet_dict, table_id, trace)
for out_port, out_pkts in outputs.items():
port_outputs.setdefault(out_port, [])
# Remove duplicate entries from the list
for out_pkt in out_pkts:
if out_pkt not in port_outputs[out_port]:
port_outputs[out_port].append(out_pkt)
next_table = next_table_id is not None
table_id = next_table_id
return port_outputs
def is_full_output(self, match, port=None, vid=None, trace=False):
"""
If port is None return True if output to any port (including special ports)
regardless of VLAN tag.
If vid is None return True if output to specified port regardless of VLAN tag.
If vid OFPVID_PRESENT bit is 0, return True if output packet does not have
a VLAN tag OR packet OFPVID_PRESENT is 0
Args:
match (dict): A dictionary keyed by header field names with values
port: The expected output port
vid: The expected output vid
trace (bool): Print the trace of traversing the tables
Returns:
true if packets with match fields is output to port with correct VLAN
"""
table_outputs = self.get_output(match, trace)
if trace:
sys.stderr.write(pprint.pformat(table_outputs) + '\n')
in_port = match.get('in_port')
for table_outputs in table_outputs.values():
for out_port, out_pkts in table_outputs.items():
for out_pkt in out_pkts:
if port == out_port and port == out_pkt['in_port']:
continue
if port is None:
# Port is None & outputting so return true
return True
if vid is None:
# Vid is None, return true if output to specified port
if port == out_port:
return True
if out_port == ofp.OFPP_IN_PORT and port == in_port:
# In some cases we want to match to specifically ofp.OFPP_IN_PORT
# otherwise we treat ofp.OFPP_IN_PORT as the match in_port
return True
if port == out_port or (out_port == ofp.OFPP_IN_PORT and port == in_port):
# Matching port, so check matching VID
if vid & ofp.OFPVID_PRESENT == 0:
# If OFPVID_PRESENT bit is 0 then packet should not have a VLAN tag
return ('vlan_vid' not in out_pkt or
out_pkt['vlan_vid'] & ofp.OFPVID_PRESENT == 0)
# VID specified, check if matching expected
return 'vlan_vid' in out_pkt and vid == out_pkt['vlan_vid']
return False
def lookup(self, match, trace=False):
"""Return the entries from flowmods that matches match.
Searches each table in the pipeline for the entries that will be
applied to the packet with fields represented by match.
Arguments:
match: a dictionary keyed by header field names with values.
header fields not provided in match must be wildcarded for the
entry to be considered matching.
Returns: a list of the flowmods that will be applied to the packet
represented by match
"""
packet_dict = match.copy() # Packet headers may be modified
instructions = []
table_id = 0
goto_table = True
while goto_table:
goto_table = False
table = self.tables[table_id]
matching_fte = None
# find a matching flowmod
for fte in table:
if fte.pkt_matches(packet_dict):
matching_fte = fte
break
# if a flowmod is found, make modifications to the match values and
# determine if another lookup is necessary
if trace:
sys.stderr.write('%d: %s\n' % (table_id, matching_fte))
if matching_fte:
for instruction in matching_fte.instructions:
instructions.append(instruction)
if instruction.type == ofp.OFPIT_GOTO_TABLE:
if table_id < instruction.table_id:
table_id = instruction.table_id
goto_table = True
elif instruction.type == ofp.OFPIT_APPLY_ACTIONS:
for action in instruction.actions:
if action.type == ofp.OFPAT_SET_FIELD:
packet_dict[action.key] = action.value
elif instruction.type == ofp.OFPIT_WRITE_METADATA:
metadata = packet_dict.get('metadata', 0)
mask = instruction.metadata_mask
mask_compl = mask ^ 0xFFFFFFFFFFFFFFFF
packet_dict['metadata'] = (metadata & mask_compl)\
| (instruction.metadata & mask)
return (instructions, packet_dict)
def flow_count(self):
"""Return number of flow tables rules"""
return sum(map(len, self.tables))
def is_output(self, match, port=None, vid=None, trace=False):
"""Return true if packets with match fields is output to port with
correct vlan.
If port is none it will return true if output to any port (including
special ports) regardless of vlan tag.
If vid is none it will return true if output to specified port
regardless of vlan tag.
To specify checking that the packet should not have a vlan tag, set the
OFPVID_PRESENT bit in vid to 0.
Arguments:
Match: a dictionary keyed by header field names with values.
"""
full_output = self.is_full_output(match.copy(), port, vid, trace)
def _output_result(action, vid_stack, port, vid):
if port is None:
return True
in_port = match.get('in_port')
result = None
if action.port == port:
if port == in_port:
result = None
elif vid is None:
result = True
elif vid & ofp.OFPVID_PRESENT == 0:
result = not vid_stack
else:
result = bool(vid_stack and vid == vid_stack[-1])
elif action.port == ofp.OFPP_IN_PORT and port == in_port:
result = True
return result
def _process_vid_stack(action, vid_stack):
if action.type == ofp.OFPAT_PUSH_VLAN:
vid_stack.append(ofp.OFPVID_PRESENT)
elif action.type == ofp.OFPAT_POP_VLAN:
vid_stack.pop()
elif action.type == ofp.OFPAT_SET_FIELD:
if action.key == 'vlan_vid':
vid_stack[-1] = action.value
return vid_stack
if trace:
sys.stderr.write(
'tracing packet flow %s matching to port %s, vid %s\n' % (match, port, vid))
# vid_stack represents the packet's vlan stack, innermost label listed
# first
match_vid = match.get('vlan_vid', 0)
vid_stack = []
if match_vid & ofp.OFPVID_PRESENT != 0:
vid_stack.append(match_vid)
instructions, _ = self.lookup(match, trace=trace)
for instruction in instructions:
if instruction.type != ofp.OFPIT_APPLY_ACTIONS:
continue
for action in instruction.actions:
vid_stack = _process_vid_stack(action, vid_stack)
if action.type == ofp.OFPAT_OUTPUT:
output_result = _output_result(
action, vid_stack, port, vid)
if output_result is not None:
if output_result != full_output:
raise FakeOFTableException('Output functions do not match')
return output_result
elif action.type == ofp.OFPAT_GROUP:
if action.group_id not in self.groups:
raise FakeOFTableException(
'output group not in group table: %s' % action)
buckets = self.groups[action.group_id].buckets
for bucket in buckets:
bucket_vid_stack = vid_stack
for bucket_action in bucket.actions:
bucket_vid_stack = _process_vid_stack(
bucket_action, bucket_vid_stack)
if bucket_action.type == ofp.OFPAT_OUTPUT:
output_result = _output_result(
bucket_action, vid_stack, port, vid)
if output_result is not None:
if output_result != full_output:
raise FakeOFTableException('Output functions do not match')
return output_result
if full_output != False:
raise FakeOFTableException('Output functions do not match')
return False
def apply_instructions_to_packet(self, match):
"""
Send packet through the fake OF table pipeline
Args:
match (dict): A dict keyed by header fields with values, represents
a packet
Returns:
dict: Modified match dict, represents packet that has been through
the pipeline with values possibly altered
"""
_, packet_dict = self.lookup(match)
return packet_dict
def __str__(self):
string = ''
for table_id, table in enumerate(self.tables):
string += '\n----- Table %u -----\n' % (table_id)
string += '\n'.join(sorted([str(flowmod) for flowmod in table]))
return string
def sort_tables(self):
"""Sort flows in tables by priority order."""
self.tables = [sorted(table, reverse=True) for table in self.tables]
class FlowMod:
"""Represents a flow modification message and its corresponding entry in
the flow table.
"""
MAC_MATCH_FIELDS = (
'eth_src', 'eth_dst', 'arp_sha', 'arp_tha', 'ipv6_nd_sll',
'ipv6_nd_tll'
)
IPV4_MATCH_FIELDS = ('ipv4_src', 'ipv4_dst', 'arp_spa', 'arp_tpa')
IPV6_MATCH_FIELDS = ('ipv6_src', 'ipv6_dst', 'ipv6_nd_target')
HEX_FIELDS = ('eth_type')
def __init__(self, flowmod):
"""flowmod is a ryu flow modification message object"""
self.priority = flowmod.priority
self.cookie = flowmod.cookie
self.instructions = flowmod.instructions
self.validate_instructions()
self.match_values = {}
self.match_masks = {}
self.out_port = None
# flowmod can be an OFPFlowMod or an OFPStats
if isinstance(flowmod, parser.OFPFlowMod):
if flowmod.command in (ofp.OFPFC_DELETE, ofp.OFPFC_DELETE_STRICT)\
and flowmod.out_port != ofp.OFPP_ANY:
self.out_port = flowmod.out_port
for key, val in flowmod.match.items():
if isinstance(val, tuple):
val, mask = val
else:
mask = -1
mask = self.match_to_bits(key, mask)
val = self.match_to_bits(key, val) & mask
self.match_values[key] = val
self.match_masks[key] = mask
def validate_instructions(self):
instruction_types = set()
for instruction in self.instructions:
if instruction.type in instruction_types:
raise FakeOFTableException(
'FlowMod with Multiple instructions of the '
'same type: {}'.format(self.instructions))
instruction_types.add(instruction.type)
def out_port_matches(self, other):
"""returns True if other has an output action to this flowmods
output_port"""
if self.out_port is None or self.out_port == ofp.OFPP_ANY:
return True
for instruction in other.instructions:
if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
for action in instruction.actions:
if action.type == ofp.OFPAT_OUTPUT:
if action.port == self.out_port:
return True
return False
def pkt_matches(self, pkt_dict):
"""returns True if pkt_dict matches this flow table entry.
args:
pkt_dict - a dictionary keyed by flow table match fields with
values
if an element is included in the flow table entry match fields but not
in the pkt_dict that is assumed to indicate a failed match
"""
# TODO: add cookie and out_group
for key, val in self.match_values.items():
if key not in pkt_dict:
return False
val_bits = self.match_to_bits(key, pkt_dict[key])
if val_bits != (val & self.match_masks[key]):
return False
return True
def _matches_match(self, other):
return (self.priority == other.priority and
self.match_values == other.match_values and
self.match_masks == other.match_masks)
def fte_matches(self, other, strict=False):
"""returns True if the flow table entry other matches this flowmod.
used for finding existing flow table entries that match with this
flowmod.
args:
other - a flowmod object
strict (bool) - whether to use strict matching (as defined in
of1.3 specification section 6.4)
"""
if not self.out_port_matches(other):
return False
if strict:
return self._matches_match(other)
for key, val in self.match_values.items():
if key not in other.match_values:
return False
if other.match_values[key] & self.match_masks[key] != val:
return False
return True
def overlaps(self, other):
""" returns True if any packet can match both self and other."""
# This is different from the matches method as matches assumes an
# undefined field is a failed match. In this case an undefined field is
# potentially an overlap and therefore is considered success
if other.priority != self.priority:
return False
for key, val in self.match_values.items():
if key in other.match_values:
if val & other.match_masks[key] != other.match_values[key]:
return False
if other.match_values[key] & self.match_masks[key] != val:
return False
return True
def match_to_bits(self, key, val):
"""convert match fields and masks to bits objects.
this allows for masked matching. Converting all match fields to the
same object simplifies things (eg __str__).
"""
if isinstance(val, Bits):
return val
def _val_to_bits(conv, val, length):
if val == -1:
return Bits(int=-1, length=length)
return Bits(bytes=conv(val), length=length)
if key in self.MAC_MATCH_FIELDS:
return _val_to_bits(addrconv.mac.text_to_bin, val, 48)
if key in self.IPV4_MATCH_FIELDS:
return _val_to_bits(addrconv.ipv4.text_to_bin, val, 32)
if key in self.IPV6_MATCH_FIELDS:
return _val_to_bits(addrconv.ipv6.text_to_bin, val, 128)
return Bits(int=int(val), length=64)
def bits_to_str(self, key, val):
if key in self.MAC_MATCH_FIELDS:
result = addrconv.mac.bin_to_text(val.tobytes())
elif key in self.IPV4_MATCH_FIELDS:
result = addrconv.ipv4.bin_to_text(val.tobytes())
elif key in self.IPV6_MATCH_FIELDS:
result = addrconv.ipv6.bin_to_text(val.tobytes())
elif key in self.HEX_FIELDS:
result = str(val.hex.lstrip('0'))
else:
result = str(val.int)
return result
def __lt__(self, other):
return self.priority < other.priority
def __eq__(self, other):
return (self._matches_match(other) and
self.out_port == other.out_port and
self.instructions == other.instructions)
def _pretty_field_str(self, key, value, mask=None):
mask_str = ""
value_int = value
mask_int = mask
if isinstance(value, Bits):
value_int = value.int
if isinstance(mask, Bits):
mask_int = mask.int # pytype: disable=attribute-error
elif mask is None:
mask_int = -1
if key == 'vlan_vid':
if value_int & ofp.OFPVID_PRESENT == 0:
result = 'vlan untagged'
elif key == 'vlan_vid' and mask_int == ofp.OFPVID_PRESENT:
result = 'vlan tagged'
else:
result = str(value_int ^ ofp.OFPVID_PRESENT)
if mask_int != -1:
mask_str = str(mask_int ^ ofp.OFPVID_PRESENT)
elif isinstance(value, Bits):
result = self.bits_to_str(key, value)
if mask is not None and mask_int != -1:
mask_str = self.bits_to_str(key, mask)
elif isinstance(value, str):
result = value
if mask is not None:
mask_str = mask
elif isinstance(value, int):
if key in self.HEX_FIELDS:
result = hex(value)
if mask is not None and mask != -1:
mask_str = hex(mask)
else:
result = str(value)
if mask is not None and mask != -1:
mask_str = str(mask)
if mask_str:
result += "/{}".format(mask_str)
return result
def _pretty_action_str(self, action):
actions_names_attrs = {
parser.OFPActionPushVlan.__name__: ('push_vlan', 'ethertype'),
parser.OFPActionPopVlan.__name__: ('pop_vlan', None),
parser.OFPActionGroup.__name__: ('group', 'group_id'),
parser.OFPActionDecNwTtl.__name__: ('dec_nw_ttl', None)}
value = None
if isinstance(action, parser.OFPActionOutput):
name = 'output'
if action.port == CONTROLLER_PORT:
value = 'CONTROLLER'
elif action.port == IN_PORT:
value = 'IN_PORT'
else:
value = str(action.port)
elif isinstance(action, parser.OFPActionSetField):
name = 'set_{}'.format(action.key)
value = self._pretty_field_str(action.key, action.value)
else:
name, attr = actions_names_attrs[type(action).__name__]
if attr:
value = getattr(action, attr)
result = name
if value:
result += " {}".format(value)
return result
def __str__(self):
result = 'Priority: {0} | Match: '.format(self.priority)
for key in sorted(self.match_values.keys()):
val = self.match_values[key]
mask = self.match_masks[key]
result += " {} {},".format(
key, self._pretty_field_str(key, val, mask))
result = result.rstrip(',')
result += " | Instructions :"
if not self.instructions:
result += ' drop'
for instruction in self.instructions:
if isinstance(instruction, parser.OFPInstructionGotoTable):
result += ' goto {}'.format(instruction.table_id)
elif isinstance(instruction, parser.OFPInstructionActions):
for action in instruction.actions:
result += " {},".format(self._pretty_action_str(action))
else:
result += str(instruction)
result = result.rstrip(',')
return result
def __repr__(self):
string = 'priority: {0} cookie: {1}'.format(self.priority, self.cookie)
for key in sorted(self.match_values.keys()):
mask = self.match_masks[key]
string += ' {0}: {1}'.format(key, self.match_values[key])
if mask.int != -1: # pytype: disable=attribute-error
string += '/{0}'.format(mask)
string += ' Instructions: {0}'.format(str(self.instructions))
return string
class FakeRyuDp: # pylint: disable=too-few-public-methods
"""Fake ryu Datapath object.
Just needed to provide a parser to allow us to extract ryu objects from
JSON
"""
def __init__(self):
"""Create fake ryu DP"""
self.ofproto_parser = parser
def parse_print_args():
"""Parse arguments for the print command"""
arg_parser = argparse.ArgumentParser(
prog='fakeoftable',
description='Prints a JSON flow table in a human readable format',
usage="""
Print a flow table in a human readable format
{argv0} print -f FILE
""".format(argv0=sys.argv[0])
)
arg_parser.add_argument(
'-f',
'--file',
help='file containing an OFPFlowStatsReply message in JSON format'
)
args = arg_parser.parse_args(sys.argv[2:])
return {'filename': args.file}
def parse_probe_args():
"""Parse arguments for the probe command"""
arg_parser = argparse.ArgumentParser(
prog='fakeoftable',
description='Performs a packet lookup on a JSON openflow table',
usage="""
Find the flow table entries in a given flow table that match a given packet
{argv0} probe -f FILE -p PACKET_STRING
""".format(argv0=sys.argv[0])
)
arg_parser.add_argument(
'-p',
'--packet',
metavar='PACKET_STRING',
help=(
'''string representation of a packet dictionary eg. '''
'''"{'in_port': 1, 'eth_dst': '01:80:c2:00:00:02', 'eth_type': '''
'''34825}"''')
)
arg_parser.add_argument(
'-f',
'--file',
metavar='FILE',
help='file containing an OFPFlowStatsReply message in JSON format'
)
args = arg_parser.parse_args(sys.argv[2:])
packet = args.packet
packet = ast.literal_eval(args.packet)
# fix vlan vid
if 'vlan_vid' in packet:
packet['vlan_vid'] |= ofp.OFPVID_PRESENT
return {'packet': packet, 'filename': args.file}
def parse_args():
"""parse arguments"""
arg_parser = argparse.ArgumentParser(
prog='fakeoftable',
description='Performs operations on JSON openflow tables',
usage="""
{argv0} <command> <args>
""".format(argv0=sys.argv[0])
)
arg_parser.add_argument(
'command',
help='Subcommand, either "print" or "probe"'
)
args = arg_parser.parse_args(sys.argv[1:2])
try:
if args.command == 'probe':
command_args = parse_probe_args()
elif args.command == 'print':
command_args = parse_print_args()
except (KeyError, IndexError, ValueError, AttributeError) as err:
print(err)
arg_parser.print_help()
sys.exit(-1)
return (args.command, command_args)
def _print(filename):
"""Prints the JSON flow table from a file in a human readable format"""
with open(filename, 'r') as f:
msg = json.load(f)
dp = FakeRyuDp()
ofmsg = ofp_parser.ofp_msg_from_jsondict(dp, msg)
table = FakeOFTable(1)
table.apply_ofmsgs([ofmsg])
print(table)
def probe(filename, packet):
"""Prints the actions applied to packet by the table from the file"""
with open(filename, 'r') as f:
msg = json.load(f)
dp = FakeRyuDp()
ofmsg = ofp_parser.ofp_msg_from_jsondict(dp, msg)
table = FakeOFTable(1)
table.apply_ofmsgs([ofmsg])
instructions, out_packet = table.lookup(packet)
print(packet)
for instruction in instructions:
print(instruction)
print(out_packet)
def main():
command, kwargs = parse_args()
if command == 'probe':
probe(**kwargs)
elif command == 'print':
_print(**kwargs)
if __name__ == '__main__':
main()
| 40.41511 | 118 | 0.571064 |
ace3d39cfd8988d28e30d8353236eda97c3ada1b | 6,012 | py | Python | apps/purchase/forms.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 115 | 2019-08-18T16:12:54.000Z | 2022-03-29T14:17:20.000Z | apps/purchase/forms.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 22 | 2019-09-09T01:34:54.000Z | 2022-03-12T00:33:40.000Z | apps/purchase/forms.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 83 | 2019-08-17T17:09:20.000Z | 2022-03-25T04:46:53.000Z | """Formularios del modulo purchase
"""
# Django Library
from django import forms
from django.forms.formsets import DELETION_FIELD_NAME
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from bootstrap_datepicker_plus import DatePickerInput
from dal import autocomplete
# Localfolder Library
from .models import PyPurchaseOrder, PyPurchaseOrderDetail
class MyDatePickerInput(DatePickerInput):
template_name = 'datepicker_plus/date-picker.html'
# ========================================================================== #
class PyPurchaseOrderForm(forms.ModelForm):
"""Formulario para agregar y/o editar ordenes de compra
"""
class Meta:
model = PyPurchaseOrder
fields = [
'date_order',
'partner_id',
'seller_id',
'note',
'company_id'
]
widgets = {
'partner_id': autocomplete.ModelSelect2(
url='PyPartner:autocomplete',
attrs={
'class': 'form-control',
'data-placeholder': _('Choose a provider ...'),
'style': 'width: 100%',
},
),
'seller_id': autocomplete.ModelSelect2(
url='PyPartner:autocomplete',
forward=['company_id'],
attrs={
'class': 'form-control',
'data-placeholder': _('Choose a seller ...'),
'style': 'width: 100%',
},
),
'date_order': MyDatePickerInput(
options={
"format": "DD/MM/YYYY HH:mm",
"showClose": True,
"showClear": True,
"showTodayButton": True,
}
),
'note': forms.Textarea(
attrs={
'class': 'form-control',
'placeholder': _('Description ...'),
'style': 'width: 100%',
},
),
'company_id': forms.HiddenInput(),
}
class PyPurchaseOrderDetailForm(forms.ModelForm):
"""Formulario para agregar y/o editar ordenes de compra
"""
class Meta:
model = PyPurchaseOrderDetail
exclude = ()
fields = [
'product_id',
'description',
'quantity',
'invoiced_quantity',
'delivered_quantity',
'uom_id',
'price',
'discount',
'tax_id',
'amount_total',
]
widgets = {
'product_id': autocomplete.ModelSelect2(
url='PyProduct:autocomplete',
attrs={
'class': 'form-control form-control-sm',
'data-placeholder': _('Select a product ...'),
'style': 'width: 180px',
},
),
'description': forms.TextInput(
attrs={
'class': 'form-control form-control-sm',
'placeholder': _('----------'),
},
),
'quantity': forms.NumberInput(
attrs={
'class': 'form-control form-control-sm',
'placeholder': _('Product quantity ...'),
},
),
'invoiced_quantity': forms.NumberInput(
attrs={
'class': 'form-control form-control-sm',
'placeholder': _('Invoiced ...'),
'readonly': True,
},
),
'delivered_quantity': forms.NumberInput(
attrs={
'class': 'form-control form-control-sm',
'placeholder': _('Delivered'),
'readonly': True,
},
),
'uom_id': autocomplete.ModelSelect2(
url='PyUom:autocomplete',
attrs={
'class': 'form-control',
'data-placeholder': _('Select a UOM ...'),
},
),
'price': forms.NumberInput(
attrs={
'class': 'form-control form-control-sm text-right',
'placeholder': 'Precio del producto ...',
# 'style': 'width: 80px',
'value': 0,
},
),
'discount': forms.NumberInput(
attrs={
'class': 'form-control form-control-sm text-right',
'placeholder': 'Descuento ...',
# 'style': 'width: 80px',
},
),
'tax_id': autocomplete.ModelSelect2Multiple(
url='PyTax:autocomplete',
attrs={
'class': 'form-control custom-select custom-select-sm',
'data-placeholder': _('Select taxes...'),
'style': 'width: 280px',
},
),
'amount_total': forms.TextInput(
attrs={
'class': 'form-control form-control-sm text-right',
'placeholder': 'Total ...',
# 'style': 'width: 80px',
'readonly': True,
},
),
}
class BaseProductFormSet(BaseInlineFormSet):
def add_fields(self, form, index):
super().add_fields(form, index)
form.fields[DELETION_FIELD_NAME].label = ''
form.fields[DELETION_FIELD_NAME].widget = forms.HiddenInput(
attrs={
'value': 'false',
},
)
PRODUCT_FORMSET = inlineformset_factory(
PyPurchaseOrder, PyPurchaseOrderDetail,
form=PyPurchaseOrderDetailForm,
formset=BaseProductFormSet,
extra=0,
can_delete=True
)
| 32.673913 | 78 | 0.453925 |
ace3d39e4e23c45e86e0667a9f1206146b9a5629 | 2,056 | py | Python | src/sdk/pynni/nni/compression/tensorflow/pruning/one_shot.py | tblanchart/nni | bbb9137c323316d6de04e15d42cf6dc47a889fcc | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/compression/tensorflow/pruning/one_shot.py | tblanchart/nni | bbb9137c323316d6de04e15d42cf6dc47a889fcc | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/compression/tensorflow/pruning/one_shot.py | tblanchart/nni | bbb9137c323316d6de04e15d42cf6dc47a889fcc | [
"MIT"
] | null | null | null | import tensorflow as tf
from ..compressor import Pruner
__all__ = [
'OneshotPruner',
'LevelPruner',
]
class OneshotPruner(Pruner):
def __init__(self, model, config_list, pruning_algorithm='level', **algo_kwargs):
super().__init__(model, config_list)
self.set_wrappers_attribute('calculated', False)
self.masker = MASKER_DICT[pruning_algorithm](model, self, **algo_kwargs)
def validate_config(self, model, config_list):
pass # TODO
def calc_masks(self, wrapper, wrapper_idx=None):
if wrapper.calculated:
return None
sparsity = wrapper.config['sparsity']
masks = self.masker.calc_masks(sparsity, wrapper, wrapper_idx)
if masks is not None:
wrapper.calculated = True
return masks
class LevelPruner(OneshotPruner):
def __init__(self, model, config_list):
super().__init__(model, config_list, pruning_algorithm='level')
class WeightMasker:
def __init__(self, model, pruner, **kwargs):
self.model = model
self.pruner = pruner
def calc_masks(self, sparsity, wrapper, wrapper_idx=None):
raise NotImplementedError()
class LevelPrunerMasker(WeightMasker):
def calc_masks(self, sparsity, wrapper, wrapper_idx=None):
masks = {}
for weight_variable in wrapper.layer.weights:
if weight_variable.name == 'bias':
continue
k = int(tf.size(weight_variable).numpy() * sparsity)
if k == 0:
continue
weight = weight_variable.read_value()
if wrapper.masks.get(weight_variable.name) is not None:
weight = tf.math.multiply(weight, wrapper.masks[weight_variable.name])
w_abs = tf.math.abs(tf.reshape(weight, [-1]))
threshold = tf.math.top_k(w_abs, k)[0][0]
mask = tf.math.greater(w_abs, threshold)
masks[weight_variable.name] = tf.cast(mask, weight.dtype)
return masks
MASKER_DICT = {
'level': LevelPrunerMasker,
}
| 30.235294 | 86 | 0.639591 |
ace3d3c1f1d43845a5920bf16e9a159d074a2443 | 19,503 | py | Python | Lib/site-packages/pyglet/media/synthesis.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-10-16T16:37:53.000Z | 2020-10-16T16:37:53.000Z | Lib/site-packages/pyglet/media/synthesis.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/pyglet/media/synthesis.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2020-10-21T15:34:59.000Z | 2020-11-19T15:23:55.000Z | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import os
import math
import struct
import random
import ctypes
from .codecs.base import Source, AudioFormat, AudioData
from collections import deque
class Envelope:
"""Base class for SynthesisSource amplitude envelopes."""
def get_generator(self, sample_rate, duration):
raise NotImplementedError
class FlatEnvelope(Envelope):
"""A flat envelope, providing basic amplitude setting.
:Parameters:
`amplitude` : float
The amplitude (volume) of the wave, from 0.0 to 1.0.
Values outside of this range will be clamped.
"""
def __init__(self, amplitude=0.5):
self.amplitude = max(min(1.0, amplitude), 0)
def get_generator(self, sample_rate, duration):
amplitude = self.amplitude
while True:
yield amplitude
class LinearDecayEnvelope(Envelope):
"""A linearly decaying envelope.
This envelope linearly decays the amplitude from the peak value
to 0, over the length of the waveform.
:Parameters:
`peak` : float
The Initial peak value of the envelope, from 0.0 to 1.0.
Values outside of this range will be clamped.
"""
def __init__(self, peak=1.0):
self.peak = max(min(1.0, peak), 0)
def get_generator(self, sample_rate, duration):
peak = self.peak
total_bytes = int(sample_rate * duration)
for i in range(total_bytes):
yield (total_bytes - i) / total_bytes * peak
class ADSREnvelope(Envelope):
"""A four part Attack, Decay, Suspend, Release envelope.
This is a four part ADSR envelope. The attack, decay, and release
parameters should be provided in seconds. For example, a value of
0.1 would be 100ms. The sustain_amplitude parameter affects the
sustain volume. This defaults to a value of 0.5, but can be provided
on a scale from 0.0 to 1.0.
:Parameters:
`attack` : float
The attack time, in seconds.
`decay` : float
The decay time, in seconds.
`release` : float
The release time, in seconds.
`sustain_amplitude` : float
The sustain amplitude (volume), from 0.0 to 1.0.
"""
def __init__(self, attack, decay, release, sustain_amplitude=0.5):
self.attack = attack
self.decay = decay
self.release = release
self.sustain_amplitude = max(min(1.0, sustain_amplitude), 0)
def get_generator(self, sample_rate, duration):
sustain_amplitude = self.sustain_amplitude
total_bytes = int(sample_rate * duration)
attack_bytes = int(sample_rate * self.attack)
decay_bytes = int(sample_rate * self.decay)
release_bytes = int(sample_rate * self.release)
sustain_bytes = total_bytes - attack_bytes - decay_bytes - release_bytes
decay_step = (1 - sustain_amplitude) / decay_bytes
release_step = sustain_amplitude / release_bytes
for i in range(1, attack_bytes + 1):
yield i / attack_bytes
for i in range(1, decay_bytes + 1):
yield 1 - (i * decay_step)
for i in range(1, sustain_bytes + 1):
yield sustain_amplitude
for i in range(1, release_bytes + 1):
yield sustain_amplitude - (i * release_step)
class TremoloEnvelope(Envelope):
"""A tremolo envelope, for modulation amplitude.
A tremolo envelope that modulates the amplitude of the
waveform with a sinusoidal pattern. The depth and rate
of modulation can be specified. Depth is calculated as
a percentage of the maximum amplitude. For example:
a depth of 0.2 and amplitude of 0.5 will fluctuate
the amplitude between 0.4 an 0.5.
:Parameters:
`depth` : float
The amount of fluctuation, from 0.0 to 1.0.
`rate` : float
The fluctuation frequency, in seconds.
`amplitude` : float
The peak amplitude (volume), from 0.0 to 1.0.
"""
def __init__(self, depth, rate, amplitude=0.5):
self.depth = max(min(1.0, depth), 0)
self.rate = rate
self.amplitude = max(min(1.0, amplitude), 0)
def get_generator(self, sample_rate, duration):
total_bytes = int(sample_rate * duration)
period = total_bytes / duration
max_amplitude = self.amplitude
min_amplitude = max(0.0, (1.0 - self.depth) * self.amplitude)
step = (math.pi * 2) / period / self.rate
for i in range(total_bytes):
value = math.sin(step * i)
yield value * (max_amplitude - min_amplitude) + min_amplitude
class SynthesisSource(Source):
"""Base class for synthesized waveforms.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, sample_rate=44800, sample_size=16, envelope=None):
self._duration = float(duration)
self.audio_format = AudioFormat(
channels=1,
sample_size=sample_size,
sample_rate=sample_rate)
self._offset = 0
self._sample_rate = sample_rate
self._sample_size = sample_size
self._bytes_per_sample = sample_size >> 3
self._bytes_per_second = self._bytes_per_sample * sample_rate
self._max_offset = int(self._bytes_per_second * self._duration)
self.envelope = envelope or FlatEnvelope(amplitude=1.0)
self._envelope_generator = self.envelope.get_generator(sample_rate, duration)
if self._bytes_per_sample == 2:
self._max_offset &= 0xfffffffe
def get_audio_data(self, num_bytes, compensation_time=0.0):
"""Return `num_bytes` bytes of audio data."""
num_bytes = min(num_bytes, self._max_offset - self._offset)
if num_bytes <= 0:
return None
timestamp = float(self._offset) / self._bytes_per_second
duration = float(num_bytes) / self._bytes_per_second
data = self._generate_data(num_bytes)
self._offset += num_bytes
return AudioData(data, num_bytes, timestamp, duration, [])
def _generate_data(self, num_bytes):
"""Generate `num_bytes` bytes of data.
Return data as ctypes array or string.
"""
raise NotImplementedError('abstract')
def seek(self, timestamp):
self._offset = int(timestamp * self._bytes_per_second)
# Bound within duration
self._offset = min(max(self._offset, 0), self._max_offset)
# Align to sample
if self._bytes_per_sample == 2:
self._offset &= 0xfffffffe
self._envelope_generator = self.envelope.get_generator(self._sample_rate, self._duration)
def save(self, filename):
"""Save the audio to disk as a standard RIFF Wave.
A standard RIFF wave header will be added to the raw PCM
audio data when it is saved to disk.
:Parameters:
`filename` : str
The file name to save as.
"""
self.seek(0)
data = self.get_audio_data(self._max_offset).get_string_data()
header = struct.pack('<4sI8sIHHIIHH4sI',
b"RIFF",
len(data) + 44 - 8,
b"WAVEfmt ",
16, # Default for PCM
1, # Default for PCM
1, # Number of channels
self._sample_rate,
self._bytes_per_second,
self._bytes_per_sample,
self._sample_size,
b"data",
len(data))
with open(filename, "wb") as f:
f.write(header)
f.write(data)
class Silence(SynthesisSource):
"""A silent waveform."""
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
return b'\127' * num_bytes
else:
return b'\0' * num_bytes
class WhiteNoise(SynthesisSource):
"""A white noise, random waveform."""
def _generate_data(self, num_bytes):
return os.urandom(num_bytes)
class Sine(SynthesisSource):
"""A sinusoid (sine) waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Sine, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
step = self.frequency * (math.pi * 2) / self.audio_format.sample_rate
envelope = self._envelope_generator
for i in range(samples):
data[i] = int(math.sin(step * i) * amplitude * next(envelope) + bias)
return data
class Triangle(SynthesisSource):
"""A triangle waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Triangle, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
value = 127
maximum = 255
minimum = 0
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
value = 0
maximum = 32767
minimum = -32768
data = (ctypes.c_short * samples)()
step = (maximum - minimum) * 2 * self.frequency / self.audio_format.sample_rate
envelope = self._envelope_generator
for i in range(samples):
value += step
if value > maximum:
value = maximum - (value - maximum)
step = -step
if value < minimum:
value = minimum - (value - minimum)
step = -step
data[i] = int(value * next(envelope))
return data
class Sawtooth(SynthesisSource):
"""A sawtooth waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Sawtooth, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
value = 127
maximum = 255
minimum = 0
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
value = 0
maximum = 32767
minimum = -32768
data = (ctypes.c_short * samples)()
step = (maximum - minimum) * self.frequency / self._sample_rate
envelope = self._envelope_generator
for i in range(samples):
value += step
if value > maximum:
value = minimum + (value % maximum)
data[i] = int(value * next(envelope))
return data
class Square(SynthesisSource):
"""A square (pulse) waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Square, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
half_period = self.audio_format.sample_rate / self.frequency / 2
envelope = self._envelope_generator
value = 1
count = 0
for i in range(samples):
if count >= half_period:
value = -value
count %= half_period
count += 1
data[i] = int(value * amplitude * next(envelope) + bias)
return data
class FM(SynthesisSource):
"""A simple FM waveform.
This is a simplistic frequency modulated waveform, based on the
concepts by John Chowning. Basic sine waves are used for both
frequency carrier and modulator inputs, of which the frequencies can
be provided. The modulation index, or amplitude, can also be adjusted.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`carrier` : int
The carrier frequency, in Hz.
`modulator` : int
The modulator frequency, in Hz.
`mod_index` : int
The modulation index.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, carrier=440, modulator=440, mod_index=1, **kwargs):
super(FM, self).__init__(duration, **kwargs)
self.carrier = carrier
self.modulator = modulator
self.mod_index = mod_index
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
car_step = 2 * math.pi * self.carrier
mod_step = 2 * math.pi * self.modulator
mod_index = self.mod_index
sample_rate = self._sample_rate
envelope = self._envelope_generator
sin = math.sin
# FM equation: sin((2 * pi * carrier) + sin(2 * pi * modulator))
for i in range(samples):
increment = i / sample_rate
data[i] = int(sin(car_step * increment + mod_index * sin(mod_step * increment))
* amplitude * next(envelope) + bias)
return data
class Digitar(SynthesisSource):
"""A guitar-like waveform.
A guitar-like waveform, based on the Karplus-Strong algorithm.
The sound is similar to a plucked guitar string. The resulting
sound decays over time, and so the actual length will vary
depending on the frequency. Lower frequencies require a longer
`length` parameter to prevent cutting off abruptly.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`decay` : float
The decay rate of the effect. Defaults to 0.996.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, decay=0.996, **kwargs):
super(Digitar, self).__init__(duration, **kwargs)
self.frequency = frequency
self.decay = decay
self.period = int(self._sample_rate / self.frequency)
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
random.seed(10)
period = self.period
ring_buffer = deque([random.uniform(-1, 1) for _ in range(period)], maxlen=period)
decay = self.decay
for i in range(samples):
data[i] = int(ring_buffer[0] * amplitude + bias)
ring_buffer.append(decay * (ring_buffer[0] + ring_buffer[1]) / 2)
return data
| 36.049908 | 97 | 0.598267 |
ace3d3effca20976aef7a5adb6766a767ba06e7f | 253 | py | Python | POO/Heranca/aula107_main.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | 1 | 2021-09-01T01:58:13.000Z | 2021-09-01T01:58:13.000Z | POO/Heranca/aula107_main.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | null | null | null | POO/Heranca/aula107_main.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | null | null | null |
from aula107_classes import Pessoa, Cliente, Aluno, ClienteVIP
c1 = Cliente('Luiz', 45)
print(c1.nome)
a1 = Aluno('Maria', 65)
print(a1.nome)
a1.falar()
c1.falar()
c1.comprar()
a1.estudar()
vip1 = ClienteVIP('Marcos', 32, 'Miranda')
vip1.falar() | 13.315789 | 62 | 0.687747 |
ace3d3f4dd13dcdc5babb45c8861f13ae05e8b90 | 1,604 | py | Python | jp.atcoder/arc134/arc134_d/28878431.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/arc134/arc134_d/28878431.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/arc134/arc134_d/28878431.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import typing
def main() -> None:
n = int(input())
a = list(map(int, input().split()))
left, right = [], []
inf = 1 << 60
mn = inf
for i in range(n - 1, -1, -1):
if a[i] > mn:
continue
mn = a[i]
left.append(a[i])
right.append(a[i + n])
left.reverse()
right.reverse()
prev = left[0]
mn = right[0]
m = len(left)
i = 1
res = [0]
while i < m:
if left[i] != prev:
break
if right[i] < mn:
mn = right[i]
res.append(i)
i += 1
if mn <= prev:
print(prev, mn)
return
flg = 0
for j in range(i - 1):
if right[j + 1] < right[j]:
flg = -1
break
elif right[j + 1] < right[j]:
flg = 1
while i < m:
if left[i] == prev:
res.append(i)
if not flg:
if right[j] < right[j - 1]:
flg = -1
elif right[j] > right[j - 1]:
flg = 1
i += 1
continue
if left[i] > right[0]:
break
if not flg and left[j] >= right[0]:
break
prev = left[i]
res.append(i)
if not flg:
if right[j] < right[j - 1]:
flg = -1
elif right[j] > right[j - 1]:
flg = 1
i += 1
ans = [left[i] for i in res] + [right[i] for i in res]
print(*ans)
if __name__ == "__main__":
main()
| 21.675676 | 59 | 0.366584 |
ace3d4482c1c7cfeffc4aeeb255e8ac2d45dd958 | 4,714 | py | Python | phase2.py | jxxiaoshaoye/simclr-noisy-label | c80d3d061f1d3de3b1692fb40288d36dbae26629 | [
"MIT"
] | null | null | null | phase2.py | jxxiaoshaoye/simclr-noisy-label | c80d3d061f1d3de3b1692fb40288d36dbae26629 | [
"MIT"
] | null | null | null | phase2.py | jxxiaoshaoye/simclr-noisy-label | c80d3d061f1d3de3b1692fb40288d36dbae26629 | [
"MIT"
] | null | null | null | import torch
import torchvision
import torchvision.transforms as transforms
import argparse
import numpy as np
from torch.autograd import Variable
from experiment import ex
from model import load_model
from utils import post_config_hook
from modules import LogisticRegression
from modules.simclr import simclrnet
from modules.transformations import TransformsSimCLR
from cifar import CIFAR10, CIFAR100
from coteachingloss import loss_coteaching
import torch.nn.functional as F
import numpy as np
import shutil
import copy
def train(args, loader, simclr_model,criterion, optimizer):
loss_epoch = 0
accuracy_epoch = 0
for step, (x, y) in enumerate(loader):
optimizer.zero_grad()
x = x.to(args.device)
y = y.to(args.device)
output = simclr_model(x)
loss = criterion(output, y)
predicted = output.argmax(1)
acc = (predicted == y).sum().item() / y.size(0)
accuracy_epoch += acc
loss.backward()
optimizer.step()
loss_epoch += loss.item()
# if step % 100 == 0:
# print(
# f"Step [{step}/{len(loader)}]\t Loss: {loss.item()}\t Accuracy: {acc}"
# )
return loss_epoch, accuracy_epoch
def test(args, loader, simclr_model, criterion, optimizer):
loss_epoch = 0
accuracy_epoch = 0
simclr_model.eval()
for step, (x, y) in enumerate(loader):
simclr_model.zero_grad()
x = x.to(args.device)
y = y.to(args.device)
output = simclr_model(x)
loss = criterion(output, y)
predicted = output.argmax(1)
acc = (predicted == y).sum().item() / y.size(0)
accuracy_epoch += acc
loss_epoch += loss.item()
return loss_epoch, accuracy_epoch
@ex.automain
def main(_run, _log):
args = argparse.Namespace(**_run.config)
args = post_config_hook(args, _run)
args.device = torch.device("cuda:7" )
# with torch.autograd.set_detect_anomaly(True):
root = "./datasets"
if args.dataset == "STL10":
train_dataset = torchvision.datasets.STL10(
root,
split="train",
download=True,
transform=TransformsSimCLR(size=224).test_transform,
)
test_dataset = torchvision.datasets.STL10(
root,
split="test",
download=True,
transform=TransformsSimCLR(size=224).test_transform,
)
elif args.dataset == "CIFAR10":
train_dataset = torchvision.datasets.CIFAR10(
root,
train=True,
download=True,
transform=TransformsSimCLR(size=224).test_transform,
)
test_dataset = torchvision.datasets.CIFAR10(
root,
train=False,
download=True,
transform=TransformsSimCLR(size=224).test_transform,
)
elif args.dataset == "CIFAR100":
train_dataset = torchvision.datasets.CIFAR100(
root,
train=True,
download=True,
transform=TransformsSimCLR(size=224).test_transform,
)
test_dataset = torchvision.datasets.CIFAR100(
root,
train=False,
download=True,
transform=TransformsSimCLR(size=224).test_transform,
)
else:
raise NotImplementedError
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.logistic_batch_size,
shuffle=True,
drop_last=True,
num_workers=args.workers,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.logistic_batch_size,
shuffle=False,
drop_last=True,
num_workers=args.workers,
)
simclr_model, _, _ = load_model(args, train_loader, reload_model=True)
in_feature=simclr_model.n_features
n_classes = 10
simclr_model=simclrnet(args,simclr_model.encoder,n_classes,in_feature).to(args.device)
simclr_model.eval()
optimizer = torch.optim.Adam(simclr_model.parameters(), lr=3e-4)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(args.n_epoch):
loss_epoch, accuracy_epoch = train(
args, train_loader, simclr_model, criterion, optimizer
)
print(
f"Epoch [{epoch}/{args.logistic_epochs}]\t Loss: {loss_epoch / len(train_loader)}\t Accuracy: {accuracy_epoch / len(train_loader)}"
)
# final testing
loss_epoch, accuracy_epoch = test(
args, arr_test_loader, simclr_model, criterion, optimizer
)
print(
f"[FINAL]\t Loss: {loss_epoch / len(test_loader)}\t Accuracy: {accuracy_epoch / len(test_loader)}"
) | 28.227545 | 143 | 0.627068 |
ace3d50c4bc2b53df8257a66d4b571252fcc40b9 | 1,470 | py | Python | pkgbuild/ubuntu/python2/basename.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
] | 1 | 2020-04-04T10:25:42.000Z | 2020-04-04T10:25:42.000Z | pkgbuild/ubuntu/python2/basename.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
] | null | null | null | pkgbuild/ubuntu/python2/basename.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
] | 3 | 2021-09-07T08:44:33.000Z | 2021-12-07T23:49:39.000Z | #!/usr/bin/env python2
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the Revised BSD License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Revised BSD License for more details.
Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k
Copyright 2011-2016 Kazuki Przyborowski - https://github.com/KazukiPrzyborowski
$FileInfo: basename.py - Last Update: 4/23/2016 Ver. 0.0.5 RC 3 - Author: cooldude2k $
'''
from __future__ import absolute_import, division, print_function, unicode_literals;
import os, sys, argparse;
__version_info__ = (0, 0, 5, "rc3");
if(__version_info__[3]!=None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2])+"+"+str(__version_info__[3]);
if(__version_info__[3]==None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2]);
proname = "basename";
prover = __version__;
profullname = proname+" "+prover;
parser = argparse.ArgumentParser(conflict_handler = "resolve", add_help = True);
parser.add_argument("-v", "--version", action = "version", version = profullname);
parser.add_argument("filepath", help = "enter a file name/path");
getargs = parser.parse_args();
print(os.path.basename(getargs.filepath));
| 40.833333 | 127 | 0.732653 |
ace3d51d8f551108d36c01993b167a8896087014 | 79 | py | Python | laspy/__init__.py | gadomski/laspy | 4d0a87767a3eb4838434784446fef15b8bf57ab1 | [
"BSD-2-Clause"
] | null | null | null | laspy/__init__.py | gadomski/laspy | 4d0a87767a3eb4838434784446fef15b8bf57ab1 | [
"BSD-2-Clause"
] | null | null | null | laspy/__init__.py | gadomski/laspy | 4d0a87767a3eb4838434784446fef15b8bf57ab1 | [
"BSD-2-Clause"
] | null | null | null | __version__ = '1.4.1'
import base
import file
import header
import util
| 11.285714 | 22 | 0.708861 |
ace3d58557b268a189d1aae3f675d1f6912e53e8 | 4,327 | py | Python | contrib/seeds/generate-seeds.py | CoinStaging/DOD | 4688d8c8f1c0036923032675c6cf19e8cb9555dd | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | CoinStaging/DOD | 4688d8c8f1c0036923032675c6cf19e8cb9555dd | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | CoinStaging/DOD | 4688d8c8f1c0036923032675c6cf19e8cb9555dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 2055)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 12055)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.355072 | 98 | 0.581465 |
ace3d77ee3cf6e8e53a12eacbb6943fa094ca8f6 | 2,732 | py | Python | conf.py | Borda/phmdoctest | 36b657fae364b5a6dcf59f8b4d92e5fb6dd713bd | [
"MIT"
] | 1 | 2021-05-13T07:40:07.000Z | 2021-05-13T07:40:07.000Z | conf.py | Borda/phmdoctest | 36b657fae364b5a6dcf59f8b4d92e5fb6dd713bd | [
"MIT"
] | null | null | null | conf.py | Borda/phmdoctest | 36b657fae364b5a6dcf59f8b4d92e5fb6dd713bd | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
py_sources_path = os.path.abspath("./src")
sys.path.insert(0, py_sources_path)
# -- Project information -----------------------------------------------------
# This file is placed in the project root directory rather than /doc.
# Configuration for Sphinx 1.8.5
project = "phmdoctest"
copyright = "2021, Mark Taylor"
author = "Mark Taylor"
# The full version, including alpha/beta/rc tags
release = "1.2.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["recommonmark", "sphinx.ext.autodoc", "sphinx.ext.napoleon"]
source_suffix = {
".rst": "restructuredtext",
".md": "markdown",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# tmarktaylor: The documentation sources are at the project root.
# Any .md, .rst, or folders at the project root that don't
# belong in the documentation should be listed here.
#
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"tests",
"src",
".tox",
".pytest_cache",
"_build",
"Thumbs.db",
".DS_Store",
# for personal dev environments
".export*",
]
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = [] # ['_static']
| 31.045455 | 79 | 0.668741 |
ace3d7b94186601d9106dc3fc6f1e4b3293a84cb | 20 | py | Python | HelloGit.py | qiuna89/Python1709 | bac4bb50b746de19cf67c90baefd820f25d44fe9 | [
"Apache-2.0"
] | null | null | null | HelloGit.py | qiuna89/Python1709 | bac4bb50b746de19cf67c90baefd820f25d44fe9 | [
"Apache-2.0"
] | null | null | null | HelloGit.py | qiuna89/Python1709 | bac4bb50b746de19cf67c90baefd820f25d44fe9 | [
"Apache-2.0"
] | null | null | null | print("HelloGitHub") | 20 | 20 | 0.8 |
ace3d80edbb1d8b8b6480bd29eafa2df2e12c858 | 35,254 | py | Python | examples/matplotlib3/matplotlib_GUI.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | 225 | 2018-03-26T11:23:22.000Z | 2022-03-24T09:44:08.000Z | examples/matplotlib3/matplotlib_GUI.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | 403 | 2018-01-03T19:47:28.000Z | 2018-03-23T17:43:39.000Z | examples/matplotlib3/matplotlib_GUI.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | 47 | 2018-04-08T16:48:38.000Z | 2021-12-21T20:08:44.000Z | # -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.9.9pre on Wed Apr 29 22:07:23 2020
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
import wx.py.shell
import matplotlib_canvas
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetTitle("Matplotlib canvas example")
# Menu Bar
self.frame_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
item = wxglade_tmp_menu.Append(wx.ID_ANY, "&Save figure as...", "")
self.Bind(wx.EVT_MENU, self.on_file_save, id=item.GetId())
item = wxglade_tmp_menu.Append(wx.ID_ANY, "E&xit", "")
self.Bind(wx.EVT_MENU, self.on_file_exit, id=item.GetId())
self.frame_menubar.Append(wxglade_tmp_menu, "&File")
self.SetMenuBar(self.frame_menubar)
# Menu Bar end
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.canvas = matplotlib_canvas.MatplotlibCanvas(self, wx.ID_ANY)
self.canvas.SetMinSize((100, 100))
#self.controller = matplotlib_canvas.NavigationController2(self.canvas)
sizer_1.Add(self.canvas, 2, wx.ALL | wx.EXPAND, 0)
self.notebook_3 = wx.Notebook(self, wx.ID_ANY)
sizer_1.Add(self.notebook_3, 1, wx.EXPAND, 0)
self.notebook_3_pane_1 = wx.Panel(self.notebook_3, wx.ID_ANY)
self.notebook_3.AddPage(self.notebook_3_pane_1, "GUI")
sizer_19 = wx.BoxSizer(wx.VERTICAL)
sizer_13 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_pane_1, wx.ID_ANY, "Cursor and Mouse"), wx.HORIZONTAL)
sizer_19.Add(sizer_13, 0, wx.EXPAND, 0)
label_21 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "x/y:")
sizer_13.Add(label_21, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.text_cursor_xy_pixel = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "px", style=wx.TE_READONLY)
self.text_cursor_xy_pixel.SetMinSize((40, -1))
self.text_cursor_xy_pixel.SetToolTip("Last cursor position in pixels on canvas")
sizer_13.Add(self.text_cursor_xy_pixel, 2, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_cursor_xy_value = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "", style=wx.TE_READONLY)
self.text_cursor_xy_value.SetMinSize((40, -1))
self.text_cursor_xy_value.SetToolTip("Last cursor position in axis values")
sizer_13.Add(self.text_cursor_xy_value, 2, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_13.Add((20, 20), 1, wx.EXPAND, 0)
label_23 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Mouse action:")
sizer_13.Add(label_23, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 30)
self.choice_mouse_action = wx.Choice(self.notebook_3_pane_1, wx.ID_ANY, choices=["None/Pick", "Pan/Zoom", "Zoom"])
self.choice_mouse_action.SetToolTip("Pick: click on element to e.g. delete them\nPan/Zoom: pan with left, zoom with right button\nZoom: zoom to rectangle")
self.choice_mouse_action.SetSelection(0)
sizer_13.Add(self.choice_mouse_action, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_24 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Zoom:")
label_24.SetToolTip("Zoom history (not yet implemented)")
sizer_13.Add(label_24, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 30)
self.button_zoom_hist_home = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "|")
self.button_zoom_hist_home.SetMinSize((18, -1))
self.button_zoom_hist_home.SetToolTip("Zoom history: home")
sizer_13.Add(self.button_zoom_hist_home, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.button_zoom_hist_back = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "<")
self.button_zoom_hist_back.SetMinSize((18, -1))
self.button_zoom_hist_back.SetToolTip("Zoom history: go back")
self.button_zoom_hist_back.Enable(False)
sizer_13.Add(self.button_zoom_hist_back, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.button_zoom_hist_forward = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, ">")
self.button_zoom_hist_forward.SetMinSize((18, -1))
self.button_zoom_hist_forward.SetToolTip("Zoom history: go forward")
self.button_zoom_hist_forward.Enable(False)
sizer_13.Add(self.button_zoom_hist_forward, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.button_autoscale = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "A")
self.button_autoscale.SetMinSize((18, -1))
self.button_autoscale.SetToolTip("auto scale axes")
sizer_13.Add(self.button_autoscale, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 4)
label_22 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Picked:")
sizer_13.Add(label_22, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 30)
self.text_picked = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "", style=wx.TE_READONLY)
self.text_picked.SetMinSize((60, -1))
sizer_13.Add(self.text_picked, 4, wx.ALIGN_CENTER_VERTICAL, 0)
self.checkbox_pick_delete = wx.CheckBox(self.notebook_3_pane_1, wx.ID_ANY, "Delete on click")
sizer_13.Add(self.checkbox_pick_delete, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 6)
sizer_12 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_pane_1, wx.ID_ANY, "Canvas Control"), wx.HORIZONTAL)
sizer_19.Add(sizer_12, 0, wx.ALL | wx.EXPAND, 3)
label_25 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Size:")
label_25.SetToolTip("Zoom history (not yet implemented)")
sizer_12.Add(label_25, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.choice_canvas_size = wx.Choice(self.notebook_3_pane_1, wx.ID_ANY, choices=["Variable", "400x300", "800x600", "1024x768", "1600x1200"])
self.choice_canvas_size.SetToolTip("change canvas size")
self.choice_canvas_size.SetSelection(0)
sizer_12.Add(self.choice_canvas_size, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_15 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Subplots:")
label_15.SetToolTip("Subplot layout; will only be applied on next Clear")
sizer_12.Add(label_15, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 30)
self.choice_subplots = wx.Choice(self.notebook_3_pane_1, wx.ID_ANY, choices=["1x1", "1x2", "2x1", "2x2", "2x3"])
self.choice_subplots.SetToolTip("Subplot layout rows x columns; will only be applied on next Clear\n")
self.choice_subplots.SetSelection(0)
sizer_12.Add(self.choice_subplots, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.button_clear_plots = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "Clear plots")
sizer_12.Add(self.button_clear_plots, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 30)
self.button_clear_figures = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "Clear figures")
self.button_clear_figures.Enable(False)
sizer_12.Add(self.button_clear_figures, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 8)
self.button_clear_all = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "Clear all")
sizer_12.Add(self.button_clear_all, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 8)
sizer_12.Add((20, 20), 1, wx.EXPAND, 0)
self.button_multicursor = wx.ToggleButton(self.notebook_3_pane_1, wx.ID_ANY, "Multicursor")
self.button_multicursor.SetToolTip("Show synchronized cursor on all plots, if multiple sublplots are enabled/plotted.")
sizer_12.Add(self.button_multicursor, 0, 0, 0)
sizer_8 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_pane_1, wx.ID_ANY, "Add Function Plots"), wx.VERTICAL)
sizer_19.Add(sizer_8, 0, wx.ALL | wx.EXPAND, 3)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_8.Add(sizer_4, 0, wx.ALL | wx.EXPAND, 5)
label_4 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "f(x) = ")
sizer_4.Add(label_4, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.text_function = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "sin(x)", style=wx.TE_PROCESS_ENTER)
sizer_4.Add(self.text_function, 1, 0, 0)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_8.Add(sizer_3, 0, wx.ALL | wx.EXPAND, 5)
label_1 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "xmin")
sizer_3.Add(label_1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_plot_xmin = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "0", style=wx.TE_PROCESS_ENTER)
self.text_plot_xmin.SetMinSize((40, -1))
sizer_3.Add(self.text_plot_xmin, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_2 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "xmax")
sizer_3.Add(label_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 0)
self.text_plot_xmax = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "10", style=wx.TE_PROCESS_ENTER)
self.text_plot_xmax.SetMinSize((40, -1))
sizer_3.Add(self.text_plot_xmax, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_3 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "step")
sizer_3.Add(label_3, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_xstep = wx.TextCtrl(self.notebook_3_pane_1, wx.ID_ANY, "0.1", style=wx.TE_PROCESS_ENTER)
self.text_plot_xstep.SetMinSize((40, -1))
sizer_3.Add(self.text_plot_xstep, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add((20, 20), 1, 0, 0)
label_19 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Subplot position:")
sizer_3.Add(label_19, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.choice_subplot = wx.Choice(self.notebook_3_pane_1, wx.ID_ANY, choices=["top left", "top right", "bottom left", "bottom right"])
self.choice_subplot.SetToolTip("not yet implemented")
self.choice_subplot.SetSelection(0)
sizer_3.Add(self.choice_subplot, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.button_plot = wx.Button(self.notebook_3_pane_1, wx.ID_ANY, "Plot")
self.button_plot.SetDefault()
sizer_3.Add(self.button_plot, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_19.Add(sizer_6, 0, wx.EXPAND, 0)
sizer_14 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_pane_1, wx.ID_ANY, "Add Figures (on plot area, by axis coordinates)"), wx.HORIZONTAL)
sizer_6.Add(sizer_14, 1, wx.ALL, 3)
self.notebook_2 = wx.Notebook(self.notebook_3_pane_1, wx.ID_ANY)
sizer_14.Add(self.notebook_2, 1, 0, 0)
self.notebook_2_line = wx.Panel(self.notebook_2, wx.ID_ANY)
self.notebook_2.AddPage(self.notebook_2_line, "Add Line")
sizer_15 = wx.BoxSizer(wx.HORIZONTAL)
label_29 = wx.StaticText(self.notebook_2_line, wx.ID_ANY, "x0")
sizer_15.Add(label_29, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_plot_line_x0 = wx.TextCtrl(self.notebook_2_line, wx.ID_ANY, "3", style=wx.TE_PROCESS_ENTER)
self.text_plot_line_x0.SetMinSize((40, -1))
sizer_15.Add(self.text_plot_line_x0, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_31 = wx.StaticText(self.notebook_2_line, wx.ID_ANY, "y0")
sizer_15.Add(label_31, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_line_y0 = wx.TextCtrl(self.notebook_2_line, wx.ID_ANY, "0", style=wx.TE_PROCESS_ENTER)
self.text_plot_line_y0.SetMinSize((40, -1))
sizer_15.Add(self.text_plot_line_y0, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_32 = wx.StaticText(self.notebook_2_line, wx.ID_ANY, "x1")
sizer_15.Add(label_32, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_line_x1 = wx.TextCtrl(self.notebook_2_line, wx.ID_ANY, "10", style=wx.TE_PROCESS_ENTER)
self.text_plot_line_x1.SetMinSize((40, -1))
sizer_15.Add(self.text_plot_line_x1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_33 = wx.StaticText(self.notebook_2_line, wx.ID_ANY, "y1")
sizer_15.Add(label_33, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_line_y1 = wx.TextCtrl(self.notebook_2_line, wx.ID_ANY, "2", style=wx.TE_PROCESS_ENTER)
self.text_plot_line_y1.SetMinSize((40, -1))
sizer_15.Add(self.text_plot_line_y1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_15.Add((20, 20), 1, 0, 0)
self.button_plot_line = wx.Button(self.notebook_2_line, wx.ID_ANY, "Plot Line")
self.button_plot_line.SetDefault()
sizer_15.Add(self.button_plot_line, 0, 0, 0)
self.notebook_2_AddRectangle = wx.Panel(self.notebook_2, wx.ID_ANY)
self.notebook_2.AddPage(self.notebook_2_AddRectangle, "Add Rectangle")
sizer_16 = wx.BoxSizer(wx.HORIZONTAL)
label_35 = wx.StaticText(self.notebook_2_AddRectangle, wx.ID_ANY, "corner x")
sizer_16.Add(label_35, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_plot_rect_x = wx.TextCtrl(self.notebook_2_AddRectangle, wx.ID_ANY, "2", style=wx.TE_PROCESS_ENTER)
self.text_plot_rect_x.SetMinSize((40, -1))
sizer_16.Add(self.text_plot_rect_x, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_36 = wx.StaticText(self.notebook_2_AddRectangle, wx.ID_ANY, "y")
sizer_16.Add(label_36, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_rect_y = wx.TextCtrl(self.notebook_2_AddRectangle, wx.ID_ANY, "0.5", style=wx.TE_PROCESS_ENTER)
self.text_plot_rect_y.SetMinSize((40, -1))
sizer_16.Add(self.text_plot_rect_y, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_37 = wx.StaticText(self.notebook_2_AddRectangle, wx.ID_ANY, "width")
sizer_16.Add(label_37, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_rect_width = wx.TextCtrl(self.notebook_2_AddRectangle, wx.ID_ANY, "3", style=wx.TE_PROCESS_ENTER)
self.text_plot_rect_width.SetMinSize((40, -1))
sizer_16.Add(self.text_plot_rect_width, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_38 = wx.StaticText(self.notebook_2_AddRectangle, wx.ID_ANY, "height")
sizer_16.Add(label_38, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_rect_height = wx.TextCtrl(self.notebook_2_AddRectangle, wx.ID_ANY, "3", style=wx.TE_PROCESS_ENTER)
self.text_plot_rect_height.SetMinSize((40, -1))
sizer_16.Add(self.text_plot_rect_height, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_40 = wx.StaticText(self.notebook_2_AddRectangle, wx.ID_ANY, "angle")
sizer_16.Add(label_40, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_rect_angle = wx.TextCtrl(self.notebook_2_AddRectangle, wx.ID_ANY, "15", style=wx.TE_PROCESS_ENTER)
self.text_plot_rect_angle.SetMinSize((40, -1))
sizer_16.Add(self.text_plot_rect_angle, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_41 = wx.StaticText(self.notebook_2_AddRectangle, wx.ID_ANY, u"°")
sizer_16.Add(label_41, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_16.Add((20, 20), 1, 0, 0)
self.button_plot_rect = wx.Button(self.notebook_2_AddRectangle, wx.ID_ANY, "Plot Rectangle")
self.button_plot_rect.SetDefault()
sizer_16.Add(self.button_plot_rect, 0, 0, 0)
self.notebook_2_circle = wx.Panel(self.notebook_2, wx.ID_ANY)
self.notebook_2.AddPage(self.notebook_2_circle, "Add Circle")
sizer_17 = wx.BoxSizer(wx.HORIZONTAL)
label_42 = wx.StaticText(self.notebook_2_circle, wx.ID_ANY, "center x")
sizer_17.Add(label_42, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_plot_circle_x = wx.TextCtrl(self.notebook_2_circle, wx.ID_ANY, "5 ", style=wx.TE_PROCESS_ENTER)
self.text_plot_circle_x.SetMinSize((40, -1))
sizer_17.Add(self.text_plot_circle_x, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_43 = wx.StaticText(self.notebook_2_circle, wx.ID_ANY, "y")
sizer_17.Add(label_43, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_circle_y = wx.TextCtrl(self.notebook_2_circle, wx.ID_ANY, "0", style=wx.TE_PROCESS_ENTER)
self.text_plot_circle_y.SetMinSize((40, -1))
sizer_17.Add(self.text_plot_circle_y, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_44 = wx.StaticText(self.notebook_2_circle, wx.ID_ANY, "radius")
sizer_17.Add(label_44, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_plot_circle_radius = wx.TextCtrl(self.notebook_2_circle, wx.ID_ANY, "1", style=wx.TE_PROCESS_ENTER)
self.text_plot_circle_radius.SetMinSize((40, -1))
sizer_17.Add(self.text_plot_circle_radius, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_17.Add((20, 20), 1, 0, 0)
self.button_plot_circle = wx.Button(self.notebook_2_circle, wx.ID_ANY, "Plot Circle")
self.button_plot_circle.SetDefault()
sizer_17.Add(self.button_plot_circle, 0, 0, 0)
sizer_9 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_pane_1, wx.ID_ANY, "Add Figures (on canvas, by pixels)"), wx.HORIZONTAL)
sizer_6.Add(sizer_9, 1, wx.ALL, 3)
self.notebook_1 = wx.Notebook(self.notebook_3_pane_1, wx.ID_ANY)
sizer_9.Add(self.notebook_1, 1, 0, 0)
self.notebook_1_line = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_line, "Add Line")
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
label_5 = wx.StaticText(self.notebook_1_line, wx.ID_ANY, "x0")
sizer_5.Add(label_5, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_line_x0 = wx.TextCtrl(self.notebook_1_line, wx.ID_ANY, "10", style=wx.TE_PROCESS_ENTER)
self.text_line_x0.SetMinSize((40, -1))
sizer_5.Add(self.text_line_x0, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_6 = wx.StaticText(self.notebook_1_line, wx.ID_ANY, "y0")
sizer_5.Add(label_6, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_line_y0 = wx.TextCtrl(self.notebook_1_line, wx.ID_ANY, "10", style=wx.TE_PROCESS_ENTER)
self.text_line_y0.SetMinSize((40, -1))
sizer_5.Add(self.text_line_y0, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_7 = wx.StaticText(self.notebook_1_line, wx.ID_ANY, "x1")
sizer_5.Add(label_7, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_line_x1 = wx.TextCtrl(self.notebook_1_line, wx.ID_ANY, "100", style=wx.TE_PROCESS_ENTER)
self.text_line_x1.SetMinSize((40, -1))
sizer_5.Add(self.text_line_x1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_8 = wx.StaticText(self.notebook_1_line, wx.ID_ANY, "y1")
sizer_5.Add(label_8, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_line_y1 = wx.TextCtrl(self.notebook_1_line, wx.ID_ANY, "100", style=wx.TE_PROCESS_ENTER)
self.text_line_y1.SetMinSize((40, -1))
sizer_5.Add(self.text_line_y1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_9 = wx.StaticText(self.notebook_1_line, wx.ID_ANY, "pixels")
sizer_5.Add(label_9, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_5.Add((20, 20), 1, wx.ALIGN_CENTER_VERTICAL, 0)
self.button_draw_line = wx.Button(self.notebook_1_line, wx.ID_ANY, "Draw Line")
self.button_draw_line.SetDefault()
sizer_5.Add(self.button_draw_line, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.notebook_1_AddRectangle = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_AddRectangle, "Add Rectangle")
sizer_10 = wx.BoxSizer(wx.HORIZONTAL)
label_10 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, "corner x")
sizer_10.Add(label_10, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_rect_x = wx.TextCtrl(self.notebook_1_AddRectangle, wx.ID_ANY, "100", style=wx.TE_PROCESS_ENTER)
self.text_rect_x.SetMinSize((40, -1))
sizer_10.Add(self.text_rect_x, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_11 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, "y")
sizer_10.Add(label_11, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_rect_y = wx.TextCtrl(self.notebook_1_AddRectangle, wx.ID_ANY, "10", style=wx.TE_PROCESS_ENTER)
self.text_rect_y.SetMinSize((40, -1))
sizer_10.Add(self.text_rect_y, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_12 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, "width")
sizer_10.Add(label_12, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_rect_width = wx.TextCtrl(self.notebook_1_AddRectangle, wx.ID_ANY, "100", style=wx.TE_PROCESS_ENTER)
self.text_rect_width.SetMinSize((40, -1))
sizer_10.Add(self.text_rect_width, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_13 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, "height")
sizer_10.Add(label_13, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_rect_height = wx.TextCtrl(self.notebook_1_AddRectangle, wx.ID_ANY, "100", style=wx.TE_PROCESS_ENTER)
self.text_rect_height.SetMinSize((40, -1))
sizer_10.Add(self.text_rect_height, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_14 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, "pixels")
sizer_10.Add(label_14, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_16 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, "angle")
sizer_10.Add(label_16, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_rect_angle = wx.TextCtrl(self.notebook_1_AddRectangle, wx.ID_ANY, "0", style=wx.TE_PROCESS_ENTER)
self.text_rect_angle.SetMinSize((40, -1))
sizer_10.Add(self.text_rect_angle, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_17 = wx.StaticText(self.notebook_1_AddRectangle, wx.ID_ANY, u"°")
sizer_10.Add(label_17, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_10.Add((20, 20), 1, 0, 0)
self.button_draw_rect = wx.Button(self.notebook_1_AddRectangle, wx.ID_ANY, "Draw Rectangle")
self.button_draw_rect.SetDefault()
sizer_10.Add(self.button_draw_rect, 0, 0, 0)
self.notebook_1_circle = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_circle, "Add Circle")
sizer_11 = wx.BoxSizer(wx.HORIZONTAL)
label_26 = wx.StaticText(self.notebook_1_circle, wx.ID_ANY, "center x")
sizer_11.Add(label_26, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_circle_x = wx.TextCtrl(self.notebook_1_circle, wx.ID_ANY, "300", style=wx.TE_PROCESS_ENTER)
self.text_circle_x.SetMinSize((40, -1))
sizer_11.Add(self.text_circle_x, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_27 = wx.StaticText(self.notebook_1_circle, wx.ID_ANY, "y")
sizer_11.Add(label_27, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_circle_y = wx.TextCtrl(self.notebook_1_circle, wx.ID_ANY, "200", style=wx.TE_PROCESS_ENTER)
self.text_circle_y.SetMinSize((40, -1))
sizer_11.Add(self.text_circle_y, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_28 = wx.StaticText(self.notebook_1_circle, wx.ID_ANY, "radius")
sizer_11.Add(label_28, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.text_circle_radius = wx.TextCtrl(self.notebook_1_circle, wx.ID_ANY, "10", style=wx.TE_PROCESS_ENTER)
self.text_circle_radius.SetMinSize((40, -1))
sizer_11.Add(self.text_circle_radius, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_30 = wx.StaticText(self.notebook_1_circle, wx.ID_ANY, "pixels")
sizer_11.Add(label_30, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_11.Add((20, 20), 1, 0, 0)
self.button_draw_circle = wx.Button(self.notebook_1_circle, wx.ID_ANY, "Draw Circle")
self.button_draw_circle.SetDefault()
sizer_11.Add(self.button_draw_circle, 0, 0, 0)
sizer_18 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_pane_1, wx.ID_ANY, "Colours etc"), wx.HORIZONTAL)
sizer_19.Add(sizer_18, 0, wx.EXPAND, 0)
label_20 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Line colour:")
sizer_18.Add(label_20, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.combo_box_colour = wx.ComboBox(self.notebook_3_pane_1, wx.ID_ANY, choices=["black", "red", "green", "blue", "yellow"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.combo_box_colour.SetToolTip("not yet implemented")
self.combo_box_colour.SetSelection(0)
sizer_18.Add(self.combo_box_colour, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_18 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Line width:")
sizer_18.Add(label_18, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 15)
self.combo_box_line_width = wx.ComboBox(self.notebook_3_pane_1, wx.ID_ANY, choices=["0.1", "0.2", "0.5", "1", "2"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.combo_box_line_width.SetMinSize((50, -1))
self.combo_box_line_width.SetToolTip("not yet implemented")
self.combo_box_line_width.SetSelection(3)
sizer_18.Add(self.combo_box_line_width, 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_46 = wx.StaticText(self.notebook_3_pane_1, wx.ID_ANY, "Line style:")
sizer_18.Add(label_46, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 15)
self.choice_line_style = wx.Choice(self.notebook_3_pane_1, wx.ID_ANY, choices=[" - solid", " -- dashed", " -. dash dot", " : dotted"])
self.choice_line_style.SetSelection(0)
sizer_18.Add(self.choice_line_style, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.notebook_3_Shell = wx.Panel(self.notebook_3, wx.ID_ANY)
self.notebook_3.AddPage(self.notebook_3_Shell, "Shell")
sizer_7 = wx.StaticBoxSizer(wx.StaticBox(self.notebook_3_Shell, wx.ID_ANY, "Shell"), wx.VERTICAL)
self.shell = wx.py.shell.Shell(self.notebook_3_Shell, wx.ID_ANY, introText = "\nThis is the shell.\nHave a look at the variables 'app', 'frame', 'canvas' and 'shell'.\n")
# insert some variables into the shell's locals
self.shell.interp.locals["frame"] = self
self.shell.interp.locals["shell2"] = self.shell
self.shell.interp.locals["canvas"] = self.canvas
sizer_7.Add(self.shell, 2, wx.EXPAND, 0)
self.notebook_3_Shell.SetSizer(sizer_7)
self.notebook_1_circle.SetSizer(sizer_11)
self.notebook_1_AddRectangle.SetSizer(sizer_10)
self.notebook_1_line.SetSizer(sizer_5)
self.notebook_2_circle.SetSizer(sizer_17)
self.notebook_2_AddRectangle.SetSizer(sizer_16)
self.notebook_2_line.SetSizer(sizer_15)
self.notebook_3_pane_1.SetSizer(sizer_19)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
self.Bind(wx.EVT_CHOICE, self.on_choice_mouse_action, self.choice_mouse_action)
self.Bind(wx.EVT_BUTTON, lambda e: self.on_button_zoom_history("home"), self.button_zoom_hist_home)
self.Bind(wx.EVT_BUTTON, lambda e: self.on_button_zoom_history("back"), self.button_zoom_hist_back)
self.Bind(wx.EVT_BUTTON, lambda e: self.on_button_zoom_history("forward"), self.button_zoom_hist_forward)
self.Bind(wx.EVT_BUTTON, self.on_button_autoscale, self.button_autoscale)
self.Bind(wx.EVT_CHOICE, self.on_choice_canvas_size, self.choice_canvas_size)
self.Bind(wx.EVT_CHOICE, self.on_choice_subplots, self.choice_subplots)
self.Bind(wx.EVT_BUTTON, lambda e: self.on_button_clear("plots"), self.button_clear_plots)
self.Bind(wx.EVT_BUTTON, lambda e: self.on_button_clear("figures"), self.button_clear_figures)
self.Bind(wx.EVT_BUTTON, lambda e: self.on_button_clear("all"), self.button_clear_all)
self.Bind(wx.EVT_TOGGLEBUTTON, self.on_button_multicursor, self.button_multicursor)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot, self.text_function)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot, self.text_plot_xmin)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot, self.text_plot_xmax)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot, self.text_plot_xstep)
self.Bind(wx.EVT_CHOICE, self.on_choice_subplot, self.choice_subplot)
self.Bind(wx.EVT_BUTTON, self.on_button_plot, self.button_plot)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_line, self.text_plot_line_x0)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_line, self.text_plot_line_y0)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_line, self.text_plot_line_x1)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_line, self.text_plot_line_y1)
self.Bind(wx.EVT_BUTTON, self.on_button_plot_line, self.button_plot_line)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_rect, self.text_plot_rect_x)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_rect, self.text_plot_rect_y)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_rect, self.text_plot_rect_width)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_rect, self.text_plot_rect_height)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_rect, self.text_plot_rect_angle)
self.Bind(wx.EVT_BUTTON, self.on_button_plot_rect, self.button_plot_rect)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_circle, self.text_plot_circle_x)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_circle, self.text_plot_circle_y)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_plot_circle, self.text_plot_circle_radius)
self.Bind(wx.EVT_BUTTON, self.on_button_plot_circle, self.button_plot_circle)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_line, self.text_line_x0)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_line, self.text_line_y0)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_line, self.text_line_x1)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_line, self.text_line_y1)
self.Bind(wx.EVT_BUTTON, self.on_button_draw_line, self.button_draw_line)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_rect, self.text_rect_x)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_rect, self.text_rect_y)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_rect, self.text_rect_width)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_rect, self.text_rect_height)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_rect, self.text_rect_angle)
self.Bind(wx.EVT_BUTTON, self.on_button_draw_rect, self.button_draw_rect)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_circle, self.text_circle_x)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_circle, self.text_circle_y)
self.Bind(wx.EVT_TEXT_ENTER, self.on_button_draw_circle, self.text_circle_radius)
self.Bind(wx.EVT_BUTTON, self.on_button_draw_circle, self.button_draw_circle)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_colour, self.combo_box_colour)
self.Bind(wx.EVT_TEXT, self.on_combo_colour, self.combo_box_colour)
self.Bind(wx.EVT_TEXT_ENTER, self.on_combo_colour, self.combo_box_colour)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_line_width, self.combo_box_line_width)
self.Bind(wx.EVT_TEXT, self.on_combo_line_width, self.combo_box_line_width)
self.Bind(wx.EVT_TEXT_ENTER, self.on_combo_line_width, self.combo_box_line_width)
self.Bind(wx.EVT_CHOICE, self.on_choice_line_style, self.choice_line_style)
# end wxGlade
def on_file_save(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_file_save' not implemented!")
event.Skip()
def on_file_exit(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_file_exit' not implemented!")
event.Skip()
def on_choice_mouse_action(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_choice_mouse_action' not implemented!")
event.Skip()
def on_button_autoscale(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_autoscale' not implemented!")
event.Skip()
def on_choice_canvas_size(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_choice_canvas_size' not implemented!")
event.Skip()
def on_choice_subplots(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_choice_subplots' not implemented!")
event.Skip()
def on_button_multicursor(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_multicursor' not implemented!")
event.Skip()
def on_button_plot(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_plot' not implemented!")
event.Skip()
def on_choice_subplot(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_choice_subplot' not implemented!")
event.Skip()
def on_button_plot_line(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_plot_line' not implemented!")
event.Skip()
def on_button_plot_rect(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_plot_rect' not implemented!")
event.Skip()
def on_button_plot_circle(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_plot_circle' not implemented!")
event.Skip()
def on_button_draw_line(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_draw_line' not implemented!")
event.Skip()
def on_button_draw_rect(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_draw_rect' not implemented!")
event.Skip()
def on_button_draw_circle(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_draw_circle' not implemented!")
event.Skip()
def on_combo_colour(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_combo_colour' not implemented!")
event.Skip()
def on_combo_line_width(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_combo_line_width' not implemented!")
event.Skip()
def on_choice_line_style(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_choice_line_style' not implemented!")
event.Skip()
# end of class MyFrame
| 54.070552 | 178 | 0.683355 |
ace3d8823941a80869a497454eeaa416b481f319 | 1,125 | py | Python | ferenda/__init__.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 18 | 2015-03-12T17:42:44.000Z | 2021-12-27T10:32:22.000Z | ferenda/__init__.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 13 | 2016-01-27T10:19:07.000Z | 2021-12-13T20:24:36.000Z | ferenda/__init__.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 6 | 2016-11-28T15:41:29.000Z | 2022-01-08T11:16:48.000Z | # flake8: noqa
from .citationparser import CitationParser
from .uriformatter import URIFormatter
from .describer import Describer
from .pdfreader import PDFReader
from .pdfanalyze import PDFAnalyzer
from .textreader import TextReader
from .triplestore import TripleStore
from .fulltextindex import FulltextIndex
from .documententry import DocumentEntry
from .fsmparser import FSMParser
from .tocpageset import TocPageset
from .tocpage import TocPage
from .facet import Facet
from .feedset import Feedset
from .feed import Feed
from .resourceloader import ResourceLoader
from .transformer import Transformer
from .document import Document
from .documentstore import DocumentStore
from .requesthandler import RequestHandler
from .documentrepository import DocumentRepository
from .pdfdocumentrepository import PDFDocumentRepository
from .compositerepository import CompositeRepository, CompositeStore
from .resources import Resources
from .wordreader import WordReader
from .wsgiapp import WSGIApp
from .devel import Devel
# gets pulled into setup.py and docs/conf.py -- but appveyor.yml is separate
__version__ = "0.3.1.dev1"
| 36.290323 | 76 | 0.848889 |
ace3d8c199b4fe4cdee7b26f54f2367d5f2b5f4a | 837 | py | Python | setup.py | numberoverzero/snails | 0a9a0dc8724e8df2906cc6d0e9314789114c86e7 | [
"MIT"
] | null | null | null | setup.py | numberoverzero/snails | 0a9a0dc8724e8df2906cc6d0e9314789114c86e7 | [
"MIT"
] | null | null | null | setup.py | numberoverzero/snails | 0a9a0dc8724e8df2906cc6d0e9314789114c86e7 | [
"MIT"
] | null | null | null | import os
import pathlib
from setuptools import setup
HERE = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
VERSION = "VERSION-NOT-FOUND"
for line in (HERE / "snails.py").read_text().split("\n"):
if line.startswith("__version__"):
VERSION = eval(line.split("=")[-1])
README = (HERE / "README.rst").read_text()
REQUIREMENTS = [
"aiosmtpd"
]
if __name__ == "__main__":
setup(
name="snails",
version=VERSION,
description="minimal smtpd handler",
long_description=README,
long_description_content_type="text/x-rst",
author="Joe Cross",
author_email="joe.mcross@gmail.com",
url="https://github.com/numberoverzero/snails",
license="MIT",
platforms="any",
py_modules=["snails"],
install_requires=REQUIREMENTS,
)
| 27.9 | 63 | 0.634409 |
ace3d8f71cc1e731913b0b77616ad3762fbaa5f3 | 1,863 | py | Python | Teams_Zoom/google_meet_autologin.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | null | null | null | Teams_Zoom/google_meet_autologin.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | 11 | 2021-02-08T20:45:23.000Z | 2022-03-12T01:00:11.000Z | Teams_Zoom/google_meet_autologin.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
import pause
import pynput
from pynput.keyboard import Key, Controller
from datetime import datetime
#It is made to be used with an institute gmail (To use with Google Meet)
#You can program what time will join to the meet in the date section
#DATE
#####################YEAR#MONTH#DAY#HOUR#MINUTE###### DO NOT PUT ZERO BEFORE A NUMBER
# pause.until(datetime(2020, 3, 27, 11, 29))
# MAIL & PASSWORD (THE MAIL U WILL USE TO ENTER TO THE MEET)
usernameStr = 'MailHere'
passwordStr = 'PasswordHere'
url_meet = 'https://meet.google.com/MEET_ID_HERE'
browser = webdriver.Chrome()
browser.get(('https://accounts.google.com/ServiceLogin?'
'service=mail&continue=https://mail.google'
'.com/mail/#identifier'))
username = browser.find_element_by_id('identifierId')
username.send_keys(usernameStr)
nextButton = browser.find_element_by_id('identifierNext')
nextButton.click()
time.sleep(5)
keyboard = Controller()
#keyboard.type(passwordStr)
password = browser.find_element_by_xpath("//input[@class='whsOnd zHQkBf']")
password.send_keys(passwordStr)
#keyboard.type(passwordStr)
signInButton = browser.find_element_by_id('passwordNext')
signInButton.click()
time.sleep(3)
# MEET
browser.get(url_meet)
time.sleep(6)
######################################################################################### ↓↓↓↓↓↓↓↓↓↓↓↓ You have to put here the name of the button in your language, in my case it's in Spanish. :)
browser.find_element_by_xpath("//span[@class='NPEfkd RveJvd snByac' and contains(text(), 'Unirme ahora')]").click()
pause | 35.150943 | 195 | 0.721954 |
ace3d92bd70d843745bb7e1d8f7fb41ed0dad178 | 995 | py | Python | Testing/inference_runner.py | uit-hdl/rhd-codes | 7b62584fae75c6ff9ce84bc38230bce5da582bbf | [
"MIT"
] | 3 | 2019-06-30T13:02:07.000Z | 2021-06-14T08:52:23.000Z | Testing/inference_runner.py | uit-hdl/rhd-codes | 7b62584fae75c6ff9ce84bc38230bce5da582bbf | [
"MIT"
] | null | null | null | Testing/inference_runner.py | uit-hdl/rhd-codes | 7b62584fae75c6ff9ce84bc38230bce5da582bbf | [
"MIT"
] | 1 | 2019-11-03T19:43:49.000Z | 2019-11-03T19:43:49.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 10:02:36 2021
@author: bpe043
"""
from inference import main
import sqlite3
import tensorflow as tf
# Too many images to load them all, so we are doing it in batches
start = 0
end = 50000
increase = 50000
result = True
batch_index = 0
# Get cursor from the database
db = sqlite3.connect("<Path_to_the_full_census_database>")
cur = db.cursor()
# Exclusion set
training_db = sqlite3.connect("<Path_to_the_dugnad_database>")
exclusion_names = training_db.cursor().execute("SELECT Name FROM cells").fetchall()
exclusion_set = [x[0] for x in exclusion_names]
# Prediction model
prediction_model = tf.keras.models.load_model("<Path_to_saved_model>", compile = False)
# While we still have images to classify
while result == True:
result = main(batch_index, start, end, cur, prediction_model, exclusion_set)
start += increase
end += increase
batch_index += 1
| 22.613636 | 88 | 0.683417 |
ace3d947245ff720e6bbd945216ba0aa6d51459d | 1,381 | py | Python | provisioning/miniprov/setup.py | ajaykumarptl/cortx-hare | 6eada402c3f90f2f56743efb959ea308b9e171e5 | [
"Apache-2.0"
] | 16 | 2020-09-25T09:34:07.000Z | 2022-03-29T17:26:39.000Z | provisioning/miniprov/setup.py | ajaykumarptl/cortx-hare | 6eada402c3f90f2f56743efb959ea308b9e171e5 | [
"Apache-2.0"
] | 536 | 2020-09-24T14:59:10.000Z | 2022-03-31T15:44:52.000Z | provisioning/miniprov/setup.py | ajaykumarptl/cortx-hare | 6eada402c3f90f2f56743efb959ea308b9e171e5 | [
"Apache-2.0"
] | 108 | 2020-09-24T15:09:29.000Z | 2022-03-25T10:13:19.000Z | # Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import os
import os.path as P
import pkgconfig
from setuptools import find_packages, setup
def read(fname):
return open(P.join(P.dirname(__file__), fname)).read().rstrip('\n')
def get_mini_prov_version():
v = os.environ.get('HAX_VERSION')
if v:
return v
else:
return read('../../VERSION')
setup(name='hare_mp',
version=get_mini_prov_version(),
packages=find_packages(),
setup_requires=['flake8', 'mypy', 'pkgconfig'],
install_requires=['setuptools', 'dataclasses'],
package_data={'': ['*.dhall']},
entry_points={'console_scripts': ['hare_setup=hare_mp.main:main']})
| 30.688889 | 74 | 0.713975 |
ace3da33ad1bfc595c19fabdcd759c16b88c14ab | 475 | py | Python | mct_camera_tools/nodes/test_mjpeg_servers_info_srv.py | iorodeo/mct | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | [
"Apache-2.0"
] | null | null | null | mct_camera_tools/nodes/test_mjpeg_servers_info_srv.py | iorodeo/mct | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | [
"Apache-2.0"
] | null | null | null | mct_camera_tools/nodes/test_mjpeg_servers_info_srv.py | iorodeo/mct | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_tools')
import rospy
import json
from mct_utilities import json_tools
from mct_camera_tools import mjpeg_servers
from mct_msg_and_srv.srv import GetJSONString
if __name__ == '__main__':
mjpeg_info_dict = mjpeg_servers.mjpeg_servers_info_srv()
if mjpeg_info_dict is not None:
for k,v in mjpeg_info_dict.iteritems():
print(k,v)
else:
print('None')
| 25 | 60 | 0.757895 |
ace3da548979480126d6cba70c8e0dfaad242749 | 4,888 | py | Python | train_enwik8.py | ClashLuke/gpt-neox | 3291d0e6c867d9d328b96e8377f5b77c6f66c323 | [
"MIT"
] | null | null | null | train_enwik8.py | ClashLuke/gpt-neox | 3291d0e6c867d9d328b96e8377f5b77c6f66c323 | [
"MIT"
] | null | null | null | train_enwik8.py | ClashLuke/gpt-neox | 3291d0e6c867d9d328b96e8377f5b77c6f66c323 | [
"MIT"
] | null | null | null | import argparse
import json
import random
from collections import defaultdict
import wandb
import socket
import deepspeed
import torch
from torch.utils.data import DataLoader
from tqdm.auto import trange
from wandb import UsageError
from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,
cycle, prepare_optimizer_parameters, decode_tokens, read_enwik8_data, is_main, prepare_data,
get_wandb_api_key)
def get_args():
parser = argparse.ArgumentParser(description='GPTNeox Deepspeed Training Script')
# Include DeepSpeed configuration arguments
parser.add_argument('--model', type=str, default="base_model")
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser.add_argument('--group_name', type=str, default=None, help='Group name used by wandb')
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
return args
def get_params(model):
model_path = model if model.endswith(".json") else f"./configs/{model}.json"
with open(model_path) as f:
params = json.load(f)
return defaultdict(lambda: None, params)
train_args = get_args()
params = get_params(train_args.model)
# instantiate GPT-like decoder model
model = GPTNeoX(
num_tokens=params["vocab_size"],
dim=params["hidden_dim"],
seq_len=params["seq_len"],
depth=params["n_layers"],
heads=params["n_heads"],
dim_head=params["dim_head"]
)
## wandb
use_wandb = get_wandb_api_key() is not None
if use_wandb:
# only display system stats from one worker per machine
wandb_settings = wandb.Settings() if is_main(train_args) else wandb.Settings(_disable_stats=True)
name = f'{socket.gethostname()}-{train_args.local_rank}' if train_args.group_name else None
try:
wandb.init(project="neox_train_enwik8", group=train_args.group_name, name=name, save_code=True,
force=False,
entity=params.get('wandb', {}).get('team'), settings=wandb_settings)
except UsageError as e:
use_wandb = False
print(e)
print('Skipping wandb. Execute `wandb login` on local machine to enable.')
model = AutoregressiveWrapper(model)
dset_params = params["dataset"]
deepspeed.init_distributed(dist_backend='nccl')
torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
if is_main(train_args):
prepare_data(dset_params["name"])
torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
else:
torch.distributed.barrier()
# prepare enwik8 data
data_train, data_val = read_enwik8_data(dset_params["path"])
train_dataset = TextSamplerDataset(data_train, params["seq_len"])
val_dataset = TextSamplerDataset(data_val, params["seq_len"])
val_loader = cycle(DataLoader(val_dataset, batch_size=params["batch_size"]))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=params["learning_rate"])
# training
ds_model_params = prepare_optimizer_parameters(model)
# deepspeed loader
model_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,
model=model,
optimizer=optim,
model_parameters=ds_model_params,
training_data=train_dataset)
if use_wandb:
wandb.config.update(params)
wandb.watch(model_engine, log_freq=10, log=params.get('wandb', {}).get('watch_model'))
pbar = trange(params["num_epochs"], mininterval=10., desc='Training Model', dynamic_ncols=True)
for _ in pbar:
for i, data in enumerate(train_loader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data)
model_engine.backward(loss)
model_engine.step()
pbar.set_description(f'Training Loss: {loss.item():.4f}')
pbar.update()
if use_wandb:
wandb.log({'loss': loss.item()})
'''if is_main(train_args) and i % params["validate_every"] == 0:
model.eval()
with torch.no_grad():
val_data = next(val_loader).cuda()
loss = model(val_data)
pbar.write(f'Validation Loss: {loss.item()}')
if is_main(train_args) and i % params["generate_every"] == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
pbar.write(f"{prime} \n\n {'*' * 100}")
sample = model.generate(inp.cuda(), params["generate_length"])
output_str = decode_tokens(sample)
pbar.write(output_str)'''
| 37.030303 | 118 | 0.65937 |
ace3dba6e5660f831e9558221c5f37abcde90934 | 25,634 | py | Python | matterport-dl.py | Slyke/matterport-dl | f9f1a12036af4e3e2b29c30a8403dbae471cc7cc | [
"Unlicense"
] | 61 | 2021-06-12T18:18:32.000Z | 2022-03-29T17:26:31.000Z | matterport-dl.py | Slyke/matterport-dl | f9f1a12036af4e3e2b29c30a8403dbae471cc7cc | [
"Unlicense"
] | 41 | 2021-07-16T14:48:03.000Z | 2022-03-28T00:19:05.000Z | matterport-dl.py | Slyke/matterport-dl | f9f1a12036af4e3e2b29c30a8403dbae471cc7cc | [
"Unlicense"
] | 23 | 2021-07-06T10:52:37.000Z | 2022-03-22T13:20:58.000Z | #!/usr/bin/env python3
'''
Downloads virtual tours from matterport.
Usage is either running this program with the URL/pageid as an argument or calling the initiateDownload(URL/pageid) method.
'''
import requests
import json
import threading
import concurrent.futures
import urllib.request
from urllib.parse import urlparse
import pathlib
import re
import os
import shutil
import sys
import time
import logging
from tqdm import tqdm
from http.server import HTTPServer, SimpleHTTPRequestHandler
import decimal
# Weird hack
accessurls = []
SHOWCASE_INTERNAL_NAME = "showcase-internal.js"
def makeDirs(dirname):
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
def getVariants():
variants = []
depths = ["512", "1k", "2k", "4k"]
for depth in range(4):
z = depths[depth]
for x in range(2**depth):
for y in range(2**depth):
for face in range(6):
variants.append(f"{z}_face{face}_{x}_{y}.jpg")
return variants
def downloadUUID(accessurl, uuid):
downloadFile(accessurl.format(filename=f'{uuid}_50k.dam'), f'{uuid}_50k.dam')
shutil.copy(f'{uuid}_50k.dam', f'..{os.path.sep}{uuid}_50k.dam')
cur_file=""
try:
for i in range(1000):
cur_file=accessurl.format(filename=f'{uuid}_50k_texture_jpg_high/{uuid}_50k_{i:03d}.jpg')
downloadFile(cur_file, f'{uuid}_50k_texture_jpg_high/{uuid}_50k_{i:03d}.jpg')
cur_file=accessurl.format(filename=f'{uuid}_50k_texture_jpg_low/{uuid}_50k_{i:03d}.jpg')
downloadFile(cur_file, f'{uuid}_50k_texture_jpg_low/{uuid}_50k_{i:03d}.jpg')
except Exception as ex:
logging.warning(f'Exception downloading file: {cur_file} of: {str(ex)}')
pass #very lazy and bad way to only download required files
def downloadSweeps(accessurl, sweeps):
with tqdm(total=(len(sweeps)*len(getVariants()))) as pbar:
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
for sweep in sweeps:
for variant in getVariants():
pbar.update(1)
executor.submit(downloadFile, accessurl.format(filename=f'tiles/{sweep}/{variant}') + "&imageopt=1", f'tiles/{sweep}/{variant}')
while executor._work_queue.qsize() > 64:
time.sleep(0.01)
def downloadFileWithJSONPost(url, file, post_json_str, descriptor):
global PROXY
if "/" in file:
makeDirs(os.path.dirname(file))
if os.path.exists(file): #skip already downloaded files except idnex.html which is really json possibly wit hnewer access keys?
logging.debug(f'Skipping json post to url: {url} ({descriptor}) as already downloaded')
opener = getUrlOpener(PROXY)
opener.addheaders.append(('Content-Type','application/json'))
req = urllib.request.Request(url)
for header in opener.addheaders: #not sure why we can't use the opener itself but it doesn't override it properly
req.add_header(header[0],header[1])
body_bytes = bytes(post_json_str, "utf-8")
req.add_header('Content-Length', len(body_bytes))
resp = urllib.request.urlopen(req, body_bytes)
with open(file, 'w', encoding="UTF-8") as the_file:
the_file.write(resp.read().decode("UTF-8"))
logging.debug(f'Successfully downloaded w/ JSON post to: {url} ({descriptor}) to: {file}')
def downloadFile(url, file, post_data=None):
global accessurls
url = GetOrReplaceKey(url,False)
if "/" in file:
makeDirs(os.path.dirname(file))
if "?" in file:
file = file.split('?')[0]
if os.path.exists(file): #skip already downloaded files except idnex.html which is really json possibly wit hnewer access keys?
logging.debug(f'Skipping url: {url} as already downloaded')
return
try:
_filename,headers = urllib.request.urlretrieve(url, file,None,post_data)
logging.debug(f'Successfully downloaded: {url} to: {file}')
return
except urllib.error.HTTPError as err:
logging.warning(f'URL error dling {url} of will try alt: {str(err)}')
# Try again but with different accessurls (very hacky!)
if "?t=" in url:
for accessurl in accessurls:
url2=""
try:
url2=f"{url.split('?')[0]}?{accessurl}"
urllib.request.urlretrieve(url2, file)
logging.debug(f'Successfully downloaded through alt: {url2} to: {file}')
return
except urllib.error.HTTPError as err:
logging.warning(f'URL error alt method tried url {url2} dling of: {str(err)}')
pass
logging.error(f'Failed to succeed for url {url}')
raise Exception
logging.error(f'Failed2 to succeed for url {url}')#hopefully not getting here?
def downloadGraphModels(pageid):
global GRAPH_DATA_REQ
makeDirs("api/mp/models")
for key in GRAPH_DATA_REQ:
file_path = f"api/mp/models/graph_{key}.json"
downloadFileWithJSONPost("https://my.matterport.com/api/mp/models/graph",file_path, GRAPH_DATA_REQ[key], key)
def downloadAssets(base):
js_files = ["browser-check",
"30","47","66","79","134","136","143","164","250","251","316","321","356","371","376","383","386","423",
"464","524","525","539","584","606","614","666","718","721","726","764","828","833","838","932","947"]
language_codes = ["af", "sq", "ar-SA", "ar-IQ", "ar-EG", "ar-LY", "ar-DZ", "ar-MA", "ar-TN", "ar-OM",
"ar-YE", "ar-SY", "ar-JO", "ar-LB", "ar-KW", "ar-AE", "ar-BH", "ar-QA", "eu", "bg",
"be", "ca", "zh-TW", "zh-CN", "zh-HK", "zh-SG", "hr", "cs", "da", "nl", "nl-BE", "en",
"en-US", "en-EG", "en-AU", "en-GB", "en-CA", "en-NZ", "en-IE", "en-ZA", "en-JM",
"en-BZ", "en-TT", "et", "fo", "fa", "fi", "fr", "fr-BE", "fr-CA", "fr-CH", "fr-LU",
"gd", "gd-IE", "de", "de-CH", "de-AT", "de-LU", "de-LI", "el", "he", "hi", "hu",
"is", "id", "it", "it-CH", "ja", "ko", "lv", "lt", "mk", "mt", "no", "pl",
"pt-BR", "pt", "rm", "ro", "ro-MO", "ru", "ru-MI", "sz", "sr", "sk", "sl", "sb",
"es", "es-AR", "es-GT", "es-CR", "es-PA", "es-DO", "es-MX", "es-VE", "es-CO",
"es-PE", "es-EC", "es-CL", "es-UY", "es-PY", "es-BO", "es-SV", "es-HN", "es-NI",
"es-PR", "sx", "sv", "sv-FI", "th", "ts", "tn", "tr", "uk", "ur", "ve", "vi", "xh",
"ji", "zu"]
font_files = ["ibm-plex-sans-100", "ibm-plex-sans-100italic", "ibm-plex-sans-200", "ibm-plex-sans-200italic", "ibm-plex-sans-300",
"ibm-plex-sans-300italic", "ibm-plex-sans-500", "ibm-plex-sans-500italic", "ibm-plex-sans-600", "ibm-plex-sans-600italic",
"ibm-plex-sans-700", "ibm-plex-sans-700italic", "ibm-plex-sans-italic", "ibm-plex-sans-regular", "mp-font", "roboto-100", "roboto-100italic",
"roboto-300", "roboto-300italic", "roboto-500", "roboto-500italic", "roboto-700", "roboto-700italic", "roboto-900", "roboto-900italic",
"roboto-italic", "roboto-regular"]
#extension assumed to be .png unless it is .svg or .jpg, for anything else place it in assets
image_files = ["360_placement_pin_maskH", "chrome", "Desktop-help-play-button.svg", "Desktop-help-spacebar", "edge", "escape", "exterior",
"exterior_hover", "firefox", "headset-cardboard", "headset-quest", "interior", "interior_hover", "matterport-logo-light.svg",
"mattertag-disc-128-free.v1", "mobile-help-play-button.svg", "nav_help_360", "nav_help_click_inside", "nav_help_gesture_drag",
"nav_help_gesture_drag_two_finger", "nav_help_gesture_pinch", "nav_help_gesture_position", "nav_help_gesture_position_two_finger",
"nav_help_gesture_tap", "nav_help_inside_key", "nav_help_keyboard_all", "nav_help_keyboard_left_right", "nav_help_keyboard_up_down",
"nav_help_mouse_click", "nav_help_mouse_drag_left", "nav_help_mouse_drag_right", "nav_help_mouse_position_left",
"nav_help_mouse_position_right", "nav_help_mouse_zoom", "nav_help_tap_inside", "nav_help_zoom_keys", "NoteColor", "NoteIcon", "pinAnchor",
"puck_256_red", "roboto-700-42_0", "safari", "scope.svg", "showcase-password-background.jpg", "surface_grid_planar_256", "tagbg", "tagmask",
"vert_arrows"]
assets = ["css/showcase.css", "css/unsupported_browser.css", "cursors/grab.png", "cursors/grabbing.png", "cursors/zoom-in.png",
"cursors/zoom-out.png", "locale/strings.json", "css/ws-blur.css"]
downloadFile(base + "js/showcase.js","js/showcase.js")
with open(f"js/showcase.js", "r", encoding="UTF-8") as f:
showcase_cont = f.read()
#lets try to extract the js files it might be loading and make sure we know them
js_extracted = re.findall(r'\.e\(([0-9]{2,3})\)', showcase_cont)
js_extracted.sort()
for js in js_extracted:
if js not in js_files:
print(f'JS FILE EXTRACTED BUT not known, please file a github issue and tell us to add: {js}.js, will download for you though:)')
js_files.append(js)
for image in image_files:
if not image.endswith(".jpg") and not image.endswith(".svg"):
image = image + ".png"
assets.append("images/" + image)
for js in js_files:
assets.append("js/" + js + ".js")
for f in font_files:
assets.extend(["fonts/" + f + ".woff", "fonts/" + f + ".woff2"])
for lc in language_codes:
assets.append("locale/messages/strings_" + lc + ".json")
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
for asset in assets:
local_file = asset
if local_file.endswith('/'):
local_file = local_file + "index.html"
executor.submit(downloadFile, f"{base}{asset}", local_file)
def setAccessURLs(pageid):
global accessurls
with open(f"api/player/models/{pageid}/files_type2", "r", encoding="UTF-8") as f:
filejson = json.load(f)
accessurls.append(filejson["base.url"].split("?")[-1])
with open(f"api/player/models/{pageid}/files_type3", "r", encoding="UTF-8") as f:
filejson = json.load(f)
accessurls.append(filejson["templates"][0].split("?")[-1])
def downloadInfo(pageid):
assets = [f"api/v1/jsonstore/model/highlights/{pageid}", f"api/v1/jsonstore/model/Labels/{pageid}", f"api/v1/jsonstore/model/mattertags/{pageid}", f"api/v1/jsonstore/model/measurements/{pageid}", f"api/v1/player/models/{pageid}/thumb?width=1707&dpr=1.5&disable=upscale", f"api/v1/player/models/{pageid}/", f"api/v2/models/{pageid}/sweeps", "api/v2/users/current", f"api/player/models/{pageid}/files"]
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
for asset in assets:
local_file = asset
if local_file.endswith('/'):
local_file = local_file + "index.html"
executor.submit(downloadFile, f"https://my.matterport.com/{asset}", local_file )
makeDirs("api/mp/models")
with open(f"api/mp/models/graph", "w", encoding="UTF-8") as f:
f.write('{"data": "empty"}')
for i in range(1,4):
downloadFile(f"https://my.matterport.com/api/player/models/{pageid}/files?type={i}", f"api/player/models/{pageid}/files_type{i}")
setAccessURLs(pageid)
def downloadPics(pageid):
with open(f"api/v1/player/models/{pageid}/index.html", "r", encoding="UTF-8") as f:
modeldata = json.load(f)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
for image in modeldata["images"]:
executor.submit(downloadFile, image["src"], urlparse(image["src"]).path[1:])
def downloadModel(pageid,accessurl):
global ADVANCED_DOWNLOAD_ALL
with open(f"api/v1/player/models/{pageid}/index.html", "r", encoding="UTF-8") as f:
modeldata = json.load(f)
accessid = re.search(r'models/([a-z0-9-_./~]*)/\{filename\}', accessurl).group(1)
makeDirs(f"models/{accessid}")
os.chdir(f"models/{accessid}")
downloadUUID(accessurl,modeldata["job"]["uuid"])
downloadSweeps(accessurl, modeldata["sweeps"])
# Patch showcase.js to fix expiration issue
def patchShowcase():
global SHOWCASE_INTERNAL_NAME
with open("js/showcase.js","r",encoding="UTF-8") as f:
j = f.read()
j = re.sub(r"\&\&\(!e.expires\|\|.{1,10}\*e.expires>Date.now\(\)\)","",j)
j = j.replace(f'"/api/mp/','`${window.location.pathname}`+"api/mp/')
j = j.replace("${this.baseUrl}", "${window.location.origin}${window.location.pathname}")
j = j.replace('e.get("https://static.matterport.com/geoip/",{responseType:"json",priority:n.RequestPriority.LOW})', '{"country_code":"US","country_name":"united states","region":"CA","city":"los angeles"}')
with open(f"js/{SHOWCASE_INTERNAL_NAME}","w",encoding="UTF-8") as f:
f.write(j)
j = j.replace(f'"POST"','"GET"') #no post requests for external hosted
with open("js/showcase.js","w",encoding="UTF-8") as f:
f.write(j)
def drange(x, y, jump):
while x < y:
yield float(x)
x += decimal.Decimal(jump)
KNOWN_ACCESS_KEY=None
def GetOrReplaceKey(url, is_read_key):
global KNOWN_ACCESS_KEY
key_regex = r'(t=2\-.+?\-0)'
match = re.search(key_regex,url)
if match is None:
return url
url_key = match.group(1)
if KNOWN_ACCESS_KEY is None and is_read_key:
KNOWN_ACCESS_KEY = url_key
elif not is_read_key and KNOWN_ACCESS_KEY:
url = url.replace(url_key, KNOWN_ACCESS_KEY)
return url
def downloadPage(pageid):
global ADVANCED_DOWNLOAD_ALL
makeDirs(pageid)
os.chdir(pageid)
ADV_CROP_FETCH = [
{
"start":"width=512&crop=1024,1024,",
"increment":'0.5'
},
{
"start":"crop=512,512,",
"increment":'0.25'
}
]
logging.basicConfig(filename='run_report.log', encoding='utf-8', level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
logging.debug(f'Started up a download run')
page_root_dir = os.path.abspath('.')
print("Downloading base page...")
r = requests.get(f"https://my.matterport.com/show/?m={pageid}")
r.encoding = "utf-8"
staticbase = re.search(r'<base href="(https://static.matterport.com/.*?)">', r.text).group(1)
match = re.search(r'"(https://cdn-\d*\.matterport\.com/models/[a-z0-9\-_/.]*/)([{}0-9a-z_/<>.]+)(\?t=.*?)"', r.text)
if match:
accessurl = f'{match.group(1)}~/{{filename}}{match.group(3)}'
print(accessurl)
else:
raise Exception("Can't find urls")
file_type_content = requests.get(f"https://my.matterport.com/api/player/models/{pageid}/files?type=3") #get a valid access key, there are a few but this is a common client used one, this also makes sure it is fresh
GetOrReplaceKey(file_type_content.text,True)
if ADVANCED_DOWNLOAD_ALL:
print("Doing advanced download of dollhouse/floorplan data...")
## Started to parse the modeldata further. As it is error prone tried to try catch silently for failures. There is more data here we could use for example:
## queries.GetModelPrefetch.data.model.locations[X].pano.skyboxes[Y].tileUrlTemplate
## queries.GetModelPrefetch.data.model.locations[X].pano.skyboxes[Y].urlTemplate
## queries.GetModelPrefetch.data.model.locations[X].pano.resolutions[Y] <--- has the resolutions they offer for this one
## goal here is to move away from some of the access url hacks, but if we are successful on try one won't matter:)
try:
match = re.search(r'window.MP_PREFETCHED_MODELDATA = (\{.+?\}\}\});', r.text)
if match:
preload_json = json.loads(match.group(1))
#download dam files
base_node = preload_json["queries"]["GetModelPrefetch"]["data"]["model"]["assets"]
for mesh in base_node["meshes"]:
try:
downloadFile(mesh["url"], urlparse(mesh["url"]).path[1:])#not expecting the non 50k one to work but mgiht as well try
except:
pass
for texture in base_node["textures"]:
try: #on first exception assume we have all the ones needed
for i in range(1000):
full_text_url = texture["urlTemplate"].replace("<texture>",f'{i:03d}')
crop_to_do = []
if texture["quality"] == "high":
crop_to_do = ADV_CROP_FETCH
for crop in crop_to_do:
for x in list(drange(0, 1, decimal.Decimal(crop["increment"]))):
for y in list(drange(0, 1, decimal.Decimal(crop["increment"]))):
xs = f'{x}'
ys = f'{y}'
if xs.endswith('.0'):
xs = xs[:-2]
if ys.endswith('.0'):
ys = ys[:-2]
complete_add=f'{crop["start"]}x{xs},y{ys}'
complete_add_file = complete_add.replace("&","_")
try:
downloadFile(full_text_url + "&" + complete_add, urlparse(full_text_url).path[1:] + complete_add_file + ".jpg")
except:
pass
downloadFile(full_text_url, urlparse(full_text_url).path[1:])
except:
pass
except:
pass
# Automatic redirect if GET param isn't correct
injectedjs = 'if (window.location.search != "?m=' + pageid + '") { document.location.search = "?m=' + pageid + '"; }'
content = r.text.replace(staticbase,".").replace('"https://cdn-1.matterport.com/','`${window.location.origin}${window.location.pathname}` + "').replace('"https://mp-app-prod.global.ssl.fastly.net/','`${window.location.origin}${window.location.pathname}` + "').replace("window.MP_PREFETCHED_MODELDATA",f"{injectedjs};window.MP_PREFETCHED_MODELDATA").replace('"https://events.matterport.com/', '`${window.location.origin}${window.location.pathname}` + "')
content = re.sub(r"validUntil\":\s*\"20[\d]{2}-[\d]{2}-[\d]{2}T","validUntil\":\"2099-01-01T",content)
with open("index.html", "w", encoding="UTF-8") as f:
f.write(content )
print("Downloading static assets...")
if os.path.exists("js/showcase.js"): #we want to always fetch showcase.js in case we patch it differently or the patching function starts to not work well run multiple times on itself
os.replace("js/showcase.js","js/showcase-bk.js") #backing up existing showcase file to be safe
downloadAssets(staticbase)
# Patch showcase.js to fix expiration issue and some other changes for local hosting
patchShowcase()
print("Downloading model info...")
downloadInfo(pageid)
print("Downloading images...")
downloadPics(pageid)
print("Downloading graph model data...")
downloadGraphModels(pageid)
print(f"Downloading model... access url: {accessurl}")
downloadModel(pageid,accessurl)
os.chdir(page_root_dir)
open("api/v1/event", 'a').close()
print("Done!")
def initiateDownload(url):
downloadPage(getPageId(url))
def getPageId(url):
return url.split("m=")[-1].split("&")[0]
class OurSimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_error(self, code, message=None):
if code == 404:
logging.warning(f'404 error: {self.path} may not be downloading everything right')
SimpleHTTPRequestHandler.send_error(self, code, message)
def do_GET(self):
global SHOWCASE_INTERNAL_NAME
redirect_msg=None
orig_request = self.path
if self.path.startswith("/js/showcase.js") and os.path.exists(f"js/{SHOWCASE_INTERNAL_NAME}"):
redirect_msg = "using our internal showcase.js file"
self.path = f"/js/{SHOWCASE_INTERNAL_NAME}"
if self.path.startswith("/locale/messages/strings_") and not os.path.exists(f".{self.path}"):
redirect_msg = "original request was for a locale we do not have downloaded"
self.path = "/locale/strings.json"
raw_path, _, query = self.path.partition('?')
if "crop=" in query and raw_path.endswith(".jpg"):
query_args = urllib.parse.parse_qs(query)
crop_addition = query_args.get("crop", None)
if crop_addition is not None:
crop_addition = f'crop={crop_addition[0]}'
else:
crop_addition = ''
width_addition = query_args.get("width", None)
if width_addition is not None:
width_addition = f'width={width_addition[0]}_'
else:
width_addition = ''
test_path = raw_path + width_addition + crop_addition + ".jpg"
if os.path.exists(f".{test_path}"):
self.path = test_path
redirect_msg = "dollhouse/floorplan texture request that we have downloaded, better than generic texture file"
if redirect_msg is not None or orig_request != self.path:
logging.info(f'Redirecting {orig_request} => {self.path} as {redirect_msg}')
SimpleHTTPRequestHandler.do_GET(self)
return;
def do_POST(self):
post_msg=None
try:
if self.path == "/api/mp/models/graph":
self.send_response(200)
self.end_headers()
content_len = int(self.headers.get('content-length'))
post_body = self.rfile.read(content_len).decode('utf-8')
json_body = json.loads(post_body)
option_name = json_body["operationName"]
if option_name in GRAPH_DATA_REQ:
file_path = f"api/mp/models/graph_{option_name}.json"
if os.path.exists(file_path):
with open(file_path, "r", encoding="UTF-8") as f:
self.wfile.write(f.read().encode('utf-8'))
post_msg=f"graph of operationName: {option_name} we are handling internally"
return;
else:
post_msg=f"graph for operationName: {option_name} we don't know how to handle, but likely could add support, returning empty instead"
self.wfile.write(bytes('{"data": "empty"}', "utf-8"))
return
except Exception as error:
post_msg = f"Error trying to handle a post request of: {str(error)} this should not happen"
pass
finally:
if post_msg is not None:
logging.info(f'Handling a post request on {self.path}: {post_msg}')
self.do_GET() #just treat the POST as a get otherwise:)
def guess_type(self, path):
res = SimpleHTTPRequestHandler.guess_type(self, path)
if res == "text/html":
return "text/html; charset=UTF-8"
return res
PROXY=False
ADVANCED_DOWNLOAD_ALL=False
GRAPH_DATA_REQ = {}
def openDirReadGraphReqs(path,pageId):
for root, dirs, filenames in os.walk(path):
for file in filenames:
with open(os.path.join(root, file), "r", encoding="UTF-8") as f:
GRAPH_DATA_REQ[file.replace(".json","")] = f.read().replace("[MATTERPORT_MODEL_ID]",pageId)
def getUrlOpener(use_proxy):
if (use_proxy):
proxy = urllib.request.ProxyHandler({'http': use_proxy,'https': use_proxy})
opener = urllib.request.build_opener(proxy)
else:
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64)'),('x-matterport-application-name','showcase')]
return opener
def getCommandLineArg(name, has_value):
for i in range(1,len(sys.argv)):
if sys.argv[i] == name:
sys.argv.pop(i)
if has_value:
return sys.argv.pop(i)
else:
return True
return False
if __name__ == "__main__":
ADVANCED_DOWNLOAD_ALL = getCommandLineArg("--advanced-download", False)
PROXY = getCommandLineArg("--proxy", True)
OUR_OPENER = getUrlOpener(PROXY)
urllib.request.install_opener(OUR_OPENER)
pageId = ""
if len(sys.argv) > 1:
pageId = getPageId(sys.argv[1])
openDirReadGraphReqs("graph_posts",pageId)
if len(sys.argv) == 2:
initiateDownload(pageId)
elif len(sys.argv) == 4:
os.chdir(getPageId(pageId))
logging.basicConfig(filename='server.log', encoding='utf-8', level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
logging.info("Server started up")
print ("View in browser: http://" + sys.argv[2] + ":" + sys.argv[3])
httpd = HTTPServer((sys.argv[2], int(sys.argv[3])), OurSimpleHTTPRequestHandler)
httpd.serve_forever()
else:
print (f"Usage:\n\tFirst Download: matterport-dl.py [url_or_page_id]\n\tThen launch the server 'matterport-dl.py [url_or_page_id] 127.0.0.1 8080' and open http://127.0.0.1:8080 in a browser\n\t--proxy 127.0.0.1:1234 -- to have it use this web proxy\n\t--advanced-download -- Use this option to try and download the cropped files for dollhouse/floorplan support")
| 49.391137 | 457 | 0.611571 |
ace3dbc7eb7dbac59c5a4c69177558b4f03f7532 | 905 | py | Python | switch/manage.py | BitingCat/superm | f358d325bc8b087df508592d986a2d1e60d03959 | [
"Apache-2.0"
] | null | null | null | switch/manage.py | BitingCat/superm | f358d325bc8b087df508592d986a2d1e60d03959 | [
"Apache-2.0"
] | null | null | null | switch/manage.py | BitingCat/superm | f358d325bc8b087df508592d986a2d1e60d03959 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.dev_config")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 33.518519 | 77 | 0.656354 |
ace3dbd8aa82abf1a371595d4174cb11cc41eef2 | 1,031 | py | Python | pymager/persistence/_schemamigrator.py | samidalouche/pymager | 86a5f02163def40b7bbf81fd17e4c3e84bc5059c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2015-01-20T03:24:13.000Z | 2015-01-20T03:24:13.000Z | pymager/persistence/_schemamigrator.py | samidalouche/pymager | 86a5f02163def40b7bbf81fd17e4c3e84bc5059c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | pymager/persistence/_schemamigrator.py | samidalouche/pymager | 86a5f02163def40b7bbf81fd17e4c3e84bc5059c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2010 Sami Dalouche
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from zope.interface import Interface, implements
class SchemaMigrator(Interface):
""" Manages the Schema, Metadata, and stores references to the Engine and Session Maker """
def create_schema(self):
""" Create the database metadata """
def drop_all_tables(self):
""" Drop all tables """
def session_template(self):
""" Creates a Spring JDBC-like template """
| 33.258065 | 95 | 0.691562 |
ace3dc00c7950d6481b75628961f38a7ece9d1db | 240 | py | Python | Python Pattern Programs/Numeric Patterns/Pattern 38.py | gitter-badger/Printing-Pattern-Programs | c3f5d2aee9f15f152b7e15f92defe1197b2b1406 | [
"MIT"
] | 61 | 2021-01-07T03:56:25.000Z | 2022-02-26T14:39:52.000Z | PythonPatternPrograms/NumericPatterns/Pattern 38.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 51 | 2020-12-25T17:06:26.000Z | 2021-05-07T12:52:56.000Z | PythonPatternPrograms/NumericPatterns/Pattern 38.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 13 | 2021-01-07T09:50:21.000Z | 2021-12-17T11:03:57.000Z | n = 5 # size=5
px = 1
py = n * 2 - 1
for x in range(n, 0, -1):
for y in range(1, n * 2 + 1):
if (y == px or y == py):
print(x, end="")
else:
print(" ", end="")
px += 1
py -= 1
print() | 20 | 33 | 0.358333 |
ace3dc257cebeb8bd3d4e9e5de8640e57b8e66bb | 2,078 | py | Python | src/dye/helpers.py | martsime/DSSCDB | 6631110c1bb477d45eab9c15324826958cd61ed6 | [
"AFL-3.0"
] | 7 | 2018-04-05T08:33:17.000Z | 2021-02-11T16:40:04.000Z | src/dye/helpers.py | Simensen-Hambro/DSSCDB | 6631110c1bb477d45eab9c15324826958cd61ed6 | [
"AFL-3.0"
] | 1 | 2017-04-21T20:24:03.000Z | 2017-04-21T20:24:03.000Z | src/dye/helpers.py | martsime/DSSCDB | 6631110c1bb477d45eab9c15324826958cd61ed6 | [
"AFL-3.0"
] | 2 | 2018-06-14T00:34:50.000Z | 2018-12-17T20:06:48.000Z | import re
import bibtexparser
import requests
from django.utils.timezone import datetime
import pybel
def get_DOI_metadata(doi):
url = 'http://dx.doi.org/' + doi
headers = {'accept': 'application/x-bibtex', 'style': 'bibtex'}
response_bytes = requests.get(url, headers=headers).content
response_string = response_bytes.decode("utf-8")
bibtex_object = bibtexparser.loads(response_string)
try:
article = bibtex_object.entries[0]
except IndexError:
return None
new_article_data = {
'author': article.get('author'),
'title': article.get('title'),
'journal': article.get('journal'),
'volume': article.get('volume'),
'doi': article.get('doi'),
'pages': article.get('pages'),
'electronic_id': article.get('ID'),
'issue_nr': article.get('number'),
'keywords': article.get('keywords'),
'year': datetime(year=int(article.get('year')), month=1, day=1),
}
return new_article_data
def to_decimal(string):
# Shapes "string" into a number with a best-effort attempt
if not isinstance(string, float):
illegal_characters = re.search('([^0-9^.^,^-])', string)
if illegal_characters:
return string
else:
rep = re.compile('(\-?\d*\.?\d+)')
result = rep.search(string)
if result:
return result
else:
return None
else:
if len(str(string)) >= 7:
string = round(string, 6)
return string
def locate_start_data(sheet):
"""
Locate control tag start row
"""
start_data = None
for row_index in range(sheet.nrows):
if "**%BEGIN%**" in sheet.row_values(row_index, 0, 1):
start_data = row_index + 2
break
return start_data
def generate_coordinates_babel(smiles):
try:
pybelmol = pybel.readstring('smi', smiles)
pybelmol.make3D()
sdf_string = pybelmol.write("sdf")
return sdf_string
except:
return ''
| 26.641026 | 72 | 0.589028 |
ace3dc2b995f40e40601439b24b925ddd3acbeab | 13,689 | py | Python | aiohttp/payload.py | loven-doo/aiohttp | 01ef966b261bc6a8934b3c53c79c92f019b404a7 | [
"Apache-2.0"
] | 10,338 | 2017-03-11T23:38:15.000Z | 2022-03-30T03:39:09.000Z | aiohttp/payload.py | folt/aiohttp | 32870da27d6076adaff9d171e662bc728b5aed29 | [
"Apache-2.0"
] | 5,884 | 2017-03-11T17:35:43.000Z | 2022-03-31T10:19:46.000Z | aiohttp/payload.py | skhalymon/aiohttp | 73b5ca5b7cadc4e8ff1f2c88589979d404220ba2 | [
"Apache-2.0"
] | 1,868 | 2017-03-11T17:44:20.000Z | 2022-03-31T04:57:26.000Z | import asyncio
import enum
import io
import json
import mimetypes
import os
import warnings
from abc import ABC, abstractmethod
from itertools import chain
from typing import (
IO,
TYPE_CHECKING,
Any,
ByteString,
Dict,
Iterable,
Optional,
TextIO,
Tuple,
Type,
Union,
)
from multidict import CIMultiDict
from typing_extensions import Final
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import (
_SENTINEL,
content_disposition_header,
guess_filename,
parse_mimetype,
sentinel,
)
from .streams import StreamReader
from .typedefs import JSONEncoder, _CIMultiDict
__all__ = (
"PAYLOAD_REGISTRY",
"get_payload",
"payload_type",
"Payload",
"BytesPayload",
"StringPayload",
"IOBasePayload",
"BytesIOPayload",
"BufferedReaderPayload",
"TextIOPayload",
"StringIOPayload",
"JsonPayload",
"AsyncIterablePayload",
)
TOO_LARGE_BYTES_BODY: Final[int] = 2 ** 20 # 1 MB
if TYPE_CHECKING: # pragma: no cover
from typing import List
class LookupError(Exception):
pass
class Order(str, enum.Enum):
normal = "normal"
try_first = "try_first"
try_last = "try_last"
def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload":
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(
factory: Type["Payload"], type: Any, *, order: Order = Order.normal
) -> None:
PAYLOAD_REGISTRY.register(factory, type, order=order)
class payload_type:
def __init__(self, type: Any, *, order: Order = Order.normal) -> None:
self.type = type
self.order = order
def __call__(self, factory: Type["Payload"]) -> Type["Payload"]:
register_payload(factory, self.type, order=self.order)
return factory
PayloadType = Type["Payload"]
_PayloadRegistryItem = Tuple[PayloadType, Any]
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self) -> None:
self._first = [] # type: List[_PayloadRegistryItem]
self._normal = [] # type: List[_PayloadRegistryItem]
self._last = [] # type: List[_PayloadRegistryItem]
def get(
self,
data: Any,
*args: Any,
_CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain,
**kwargs: Any,
) -> "Payload":
if isinstance(data, Payload):
return data
for factory, type in _CHAIN(self._first, self._normal, self._last):
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(
self, factory: PayloadType, type: Any, *, order: Order = Order.normal
) -> None:
if order is Order.try_first:
self._first.append((factory, type))
elif order is Order.normal:
self._normal.append((factory, type))
elif order is Order.try_last:
self._last.append((factory, type))
else:
raise ValueError(f"Unsupported order {order!r}")
class Payload(ABC):
_default_content_type = "application/octet-stream" # type: str
_size = None # type: Optional[int]
def __init__(
self,
value: Any,
headers: Optional[
Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]]
] = None,
content_type: Union[None, str, _SENTINEL] = sentinel,
filename: Optional[str] = None,
encoding: Optional[str] = None,
**kwargs: Any,
) -> None:
self._encoding = encoding
self._filename = filename
self._headers = CIMultiDict() # type: _CIMultiDict
self._value = value
if content_type is not sentinel and content_type is not None:
assert isinstance(content_type, str)
self._headers[hdrs.CONTENT_TYPE] = content_type
elif self._filename is not None:
content_type = mimetypes.guess_type(self._filename)[0]
if content_type is None:
content_type = self._default_content_type
self._headers[hdrs.CONTENT_TYPE] = content_type
else:
self._headers[hdrs.CONTENT_TYPE] = self._default_content_type
self._headers.update(headers or {})
@property
def size(self) -> Optional[int]:
"""Size of the payload."""
return self._size
@property
def filename(self) -> Optional[str]:
"""Filename of the payload."""
return self._filename
@property
def headers(self) -> _CIMultiDict:
"""Custom item headers"""
return self._headers
@property
def _binary_headers(self) -> bytes:
return (
"".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode(
"utf-8"
)
+ b"\r\n"
)
@property
def encoding(self) -> Optional[str]:
"""Payload encoding"""
return self._encoding
@property
def content_type(self) -> str:
"""Content type"""
return self._headers[hdrs.CONTENT_TYPE]
def set_content_disposition(
self,
disptype: str,
quote_fields: bool = True,
_charset: str = "utf-8",
**params: Any,
) -> None:
"""Sets ``Content-Disposition`` header."""
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, _charset=_charset, **params
)
@abstractmethod
async def write(self, writer: AbstractStreamWriter) -> None:
"""Write payload.
writer is an AbstractStreamWriter instance:
"""
class BytesPayload(Payload):
def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, (bytes, bytearray, memoryview)):
raise TypeError(f"value argument must be byte-ish, not {type(value)!r}")
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
if isinstance(value, memoryview):
self._size = value.nbytes
else:
self._size = len(value)
if self._size > TOO_LARGE_BYTES_BODY:
warnings.warn(
"Sending a large body directly with raw bytes might"
" lock the event loop. You should probably pass an "
"io.BytesIO object instead",
ResourceWarning,
source=self,
)
async def write(self, writer: AbstractStreamWriter) -> None:
await writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(
self,
value: str,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
real_encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
real_encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
real_encoding = encoding
super().__init__(
value.encode(real_encoding),
encoding=real_encoding,
content_type=content_type,
*args,
**kwargs,
)
class StringIOPayload(StringPayload):
def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None:
super().__init__(value.read(), *args, **kwargs)
class IOBasePayload(Payload):
_value: IO[Any]
def __init__(
self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any
) -> None:
if "filename" not in kwargs:
kwargs["filename"] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None and disposition is not None:
if hdrs.CONTENT_DISPOSITION not in self.headers:
self.set_content_disposition(disposition, filename=self._filename)
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
while chunk:
await writer.write(chunk)
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
finally:
await loop.run_in_executor(None, self._value.close)
class TextIOPayload(IOBasePayload):
_value: TextIO
def __init__(
self,
value: TextIO,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
super().__init__(
value,
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
while chunk:
data = (
chunk.encode(encoding=self._encoding)
if self._encoding
else chunk.encode()
)
await writer.write(data)
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
finally:
await loop.run_in_executor(None, self._value.close)
class BytesIOPayload(IOBasePayload):
@property
def size(self) -> int:
position = self._value.tell()
end = self._value.seek(0, os.SEEK_END)
self._value.seek(position)
return end - position
class BufferedReaderPayload(IOBasePayload):
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
return None
class JsonPayload(BytesPayload):
def __init__(
self,
value: Any,
encoding: str = "utf-8",
content_type: str = "application/json",
dumps: JSONEncoder = json.dumps,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(
dumps(value).encode(encoding),
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
if TYPE_CHECKING: # pragma: no cover
from typing import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator[bytes]
_AsyncIterable = AsyncIterable[bytes]
else:
from collections.abc import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator
_AsyncIterable = AsyncIterable
class AsyncIterablePayload(Payload):
_iter = None # type: Optional[_AsyncIterator]
def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, AsyncIterable):
raise TypeError(
"value argument must support "
"collections.abc.AsyncIterablebe interface, "
"got {!r}".format(type(value))
)
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
self._iter = value.__aiter__()
async def write(self, writer: AbstractStreamWriter) -> None:
if self._iter:
try:
# iter is not None check prevents rare cases
# when the case iterable is used twice
while True:
chunk = await self._iter.__anext__()
await writer.write(chunk)
except StopAsyncIteration:
self._iter = None
class StreamReaderPayload(AsyncIterablePayload):
def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None:
super().__init__(value.iter_any(), *args, **kwargs)
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader)
# try_last for giving a chance to more specialized async interables like
# multidict.BodyPartReaderPayload override the default
PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last)
| 29.502155 | 88 | 0.60523 |
ace3dc46df41edeff3edea8e77464e7ee5dbe342 | 7,148 | py | Python | docs/conf.py | reoono/jupyter-fs | 8d4fedd3ac30104fbaec313281ccd613473de1bf | [
"Apache-2.0"
] | 6 | 2019-11-21T19:54:16.000Z | 2020-03-07T18:43:15.000Z | docs/conf.py | reoono/jupyter-fs | 8d4fedd3ac30104fbaec313281ccd613473de1bf | [
"Apache-2.0"
] | 7 | 2019-11-05T00:04:54.000Z | 2020-03-08T23:20:45.000Z | docs/conf.py | reoono/jupyter-fs | 8d4fedd3ac30104fbaec313281ccd613473de1bf | [
"Apache-2.0"
] | 1 | 2019-11-27T17:34:18.000Z | 2019-11-27T17:34:18.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# jupyterfs documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 12 22:07:11 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys
import os
import os.path
import subprocess
import sphinx_rtd_theme
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"recommonmark",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "jupyterfs"
copyright = "2018, Tim Paine"
author = "Tim Paine"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = "0.4.0alpha0"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "jupyterfsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "jupyterfs.tex", "jupyterfs Documentation", "Tim Paine", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "jupyterfs", "jupyterfs Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"jupyterfs",
"jupyterfs Documentation",
author,
"jupyterfs",
"One line description of project.",
"Miscellaneous",
),
]
def run_copyreadme(_):
out = os.path.abspath(os.path.join(os.path.dirname(__file__), "index.md"))
readme = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "README.md"))
api = os.path.abspath(os.path.join(os.path.dirname(__file__), "api.md"))
with open(out, "w") as fp1:
with open(readme, "r") as fp2:
for line in fp2:
if "src=" in line:
# <img>
fp1.write(line.replace("docs/", ""))
elif "](docs/" in line:
# md
fp1.write(line.replace("](docs/", "]("))
else:
fp1.write(line)
fp1.write("# API Documentation\n\n")
with open(api, "r") as fp2:
fp1.write(fp2.read())
def run_apidoc(_):
out_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "api"))
jupyterfs_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "jupyterfs")
)
cmd_path = "sphinx-apidoc"
if hasattr(sys, "real_prefix"): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
cmd_path = os.path.abspath(os.path.join(sys.prefix, "bin", "sphinx-apidoc"))
subprocess.check_call(
[cmd_path, "-E", "-M", "-o", out_dir, jupyterfs_dir, "--force"]
)
def setup(app):
app.add_config_value(
"recommonmark_config",
{
"auto_toc_tree_section": "Contents",
},
True,
)
app.add_transform(AutoStructify)
app.connect("builder-inited", run_copyreadme)
app.connect("builder-inited", run_apidoc)
| 31.213974 | 88 | 0.65305 |
ace3dc7c60e4629afeea4890841444bc3a564df7 | 6,672 | py | Python | api/app/game_package.py | gurland/SIGame_web | e8e5a90b21dd7bcd4af2306c4329aa6f7a8ee4f5 | [
"MIT"
] | 6 | 2018-07-26T04:05:49.000Z | 2020-06-10T23:39:56.000Z | api/app/game_package.py | gurland/SIGame_web | e8e5a90b21dd7bcd4af2306c4329aa6f7a8ee4f5 | [
"MIT"
] | 19 | 2018-06-13T12:45:57.000Z | 2020-04-25T14:17:42.000Z | api/app/game_package.py | gurland/SIGame_web | e8e5a90b21dd7bcd4af2306c4329aa6f7a8ee4f5 | [
"MIT"
] | 2 | 2018-06-18T18:49:46.000Z | 2020-04-19T06:31:09.000Z | from bs4 import BeautifulSoup
from uuid import uuid4
class ParsingError(Exception):
pass
class AtomTypeError(ParsingError):
pass
class ValidationError(ParsingError):
pass
class GamePackage:
def __init__(self, xml):
self.soup = BeautifulSoup(xml, 'lxml')
self.root = self.soup.find('package')
if not self.root:
raise ValidationError('Root element is missing')
self.authors = [author.text for author in self.root.find('info').find_all('author')]
try:
self.comments = self.root.find('comments').text
self.sources = self.root.find('sources').text
except AttributeError:
self.comments = []
self.sources = []
self.name = self.root.get('name', '')
self.date = self.root.get('date', '')
self.version = self.root.get('version', '')
self.id = self.root.get('id', str(uuid4()))
self.restriction = self.root.get('restriction', '')
def to_dict(self):
package_rounds = []
package_dict = {'_id': self.id,
'name': self.name,
'date': self.date,
'version': self.version,
'restriction': self.restriction,
'comments': self.comments,
'authors': self.authors,
'sources': self.sources,
'rounds': package_rounds,
}
for round_element in self.soup.find_all('round'):
round_name = round_element['name']
try:
round_authors = [author.text for author in round_element.find('info').find_all('author')]
round_comments = [comments.text for comments in round_element.find('info').find_all('comments')]
round_sources = [sources.text for sources in round_element.find('info').find_all('sources')]
except AttributeError:
round_authors = []
round_comments = []
round_sources = []
themes = []
for theme in round_element.find_all('theme'):
theme_name = theme['name']
try:
theme_authors = [author.text for author in theme.find('info').find_all('author')]
theme_comments = [comments.text for comments in theme.find('info').find_all('comments')]
theme_sources = [sources.text for sources in theme.find('info').find_all('sources')]
except AttributeError:
theme_authors = []
theme_comments = []
theme_sources = []
questions = []
for question in theme.find_all('question'):
q_price = question['price']
try:
q_authors = [author.text for author in question.find('info').find_all('author')]
q_comments = [comments.text for comments in question.find('info').find_all('comments')]
q_sources = [sources.text for sources in question.find('info').find_all('sources')]
except AttributeError:
q_authors = []
q_comments = []
q_sources = []
atoms, atom_answers = self.get_atoms(question)
answers = [answer.text for answer in question.find('right').find_all('answer')]
params = {}
try:
q_type = question.find('type')['name']
for param in question.find('type', attrs={'name': q_type}).find_all('param'):
params[param['name']] = param.text
except TypeError:
q_type = 'simple'
params = []
questions.append({'info': {'price': q_price,
'authors': q_authors,
'comments': q_comments,
'sources': q_sources,
'type': {'name': q_type,
'params': params}},
'atoms': atoms,
'answers': answers,
'atom_answers': atom_answers})
themes.append({'name': theme_name,
'info': {'authors': theme_authors,
'comments': theme_comments,
'sources': theme_sources},
'questions': questions})
package_rounds.append({'name': round_name,
'info': {'authors': round_authors,
'comments': round_comments,
'sources': round_sources},
'themes': themes})
return package_dict
def get_media_link(self, atom_type, atom_content):
if atom_content.startswith('@'):
media_type_dirs = {'image': 'Images', 'voice': 'Audio', 'video': 'Video'}
return f'/media/{self.id}/{media_type_dirs[atom_type]}/{atom_content[1:]}'
else:
return atom_content
def get_atoms(self, question):
atom_elements = question.find_all('atom')
atoms = []
atom_answers = []
atom_after_marker = False
for atom_element in atom_elements:
try:
atom_type = atom_element['type']
except KeyError:
atom_type = 'simple'
atom_content = atom_element.text
if atom_type == 'marker':
atom_after_marker = True
continue
elif atom_type in ('simple', 'say'):
if atom_after_marker:
atom_answers.append((atom_type, atom_content))
else:
atoms.append((atom_type, atom_content))
elif atom_type in ('image', 'video', 'voice'):
if atom_after_marker:
atom_answers.append((atom_type, self.get_media_link(atom_type, atom_content)))
else:
atoms.append((atom_type, self.get_media_link(atom_type, atom_content)))
else:
raise AtomTypeError('Unknown atom type')
return atoms, atom_answers
| 39.952096 | 112 | 0.480216 |
ace3de049d479860ffbad4ba33bab6613dd54a22 | 761 | py | Python | tests/transmitter.py | ridi/django-shard-library | 405e1c213420e095f776d8d2969a147bb0793d9c | [
"BSD-3-Clause"
] | 17 | 2018-03-12T11:37:14.000Z | 2021-12-09T15:30:52.000Z | tests/transmitter.py | ridi/django-shard-library | 405e1c213420e095f776d8d2969a147bb0793d9c | [
"BSD-3-Clause"
] | 12 | 2018-03-12T10:39:39.000Z | 2018-08-21T03:26:09.000Z | tests/transmitter.py | ridi/django-shard-library | 405e1c213420e095f776d8d2969a147bb0793d9c | [
"BSD-3-Clause"
] | 3 | 2018-03-12T10:32:11.000Z | 2021-04-02T06:24:14.000Z | from typing import List, Tuple
from shard_static.models import BaseShardStaticModel, BaseStaticTransmitStatus
from shard_static.transmitter import Transmitter
from tests.models import StaticTransmitStatus
class TestTransmitter(Transmitter):
status_class = StaticTransmitStatus
def collect(self, status: BaseStaticTransmitStatus) -> Tuple[int, List[BaseShardStaticModel]]:
items = self.model_class.objects.filter(id__gte=status.criterion)
if not items.exists():
return status.criterion, []
return items.last().id, items
def transmit(self, items: List[BaseShardStaticModel]) -> List[BaseShardStaticModel]:
self.model_class.objects.shard(shard=self.shard).bulk_create(objs=items)
return items
| 34.590909 | 98 | 0.755585 |
ace3ded25868317e4d9ee3bd110dc66e40ddc1c0 | 921 | py | Python | login.py | evanscottgray/eggscrape | c578fd56153d6aef55f2ae3bf0f9029a57c26016 | [
"MIT"
] | null | null | null | login.py | evanscottgray/eggscrape | c578fd56153d6aef55f2ae3bf0f9029a57c26016 | [
"MIT"
] | null | null | null | login.py | evanscottgray/eggscrape | c578fd56153d6aef55f2ae3bf0f9029a57c26016 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
USER = ''
PASS = ''
def get_requests_client():
client = requests.session()
LOGIN_URL = 'https://egghead.io/users/sign_in'
login_page = client.get(LOGIN_URL)
html = login_page.text
input_tag = BeautifulSoup(html).find('input',
attrs={'name': 'authenticity_token'})
csrftoken = input_tag['value']
data = {'user[email]': USER,
'user[password]': PASS,
'authenticity_token': csrftoken,
'commit': 'Sign in'}
headers = {'Referrer': 'https://egghead.io/users/sign_in',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html',
'Origin': 'https://egghead.io'}
client.post(LOGIN_URL,
data=data,
headers=headers,
cookies=login_page.cookies)
return client
| 31.758621 | 78 | 0.565689 |
ace3df27ab053f114005e34b9fd77adc06dfa971 | 15,662 | py | Python | python/ccxt/async_support/base/exchange.py | wooner49/ccxt | d1ffbd2ef0b5745fd56dbe0bd61556304a84bf84 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | wooner49/ccxt | d1ffbd2ef0b5745fd56dbe0bd61556304a84bf84 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | wooner49/ccxt | d1ffbd2ef0b5745fd56dbe0bd61556304a84bf84 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.55.56'
# -----------------------------------------------------------------------------
import asyncio
import concurrent.futures
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttler import Throttler
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.errors import BadSymbol
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.throttle = None
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = Throttler(self.tokenBucket, self.asyncio_loop)
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop, enable_cleanup_closed=True)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
# insert cost into here...
await self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
# CIMultiDictProxy
raw_headers = response.headers
headers = {}
for header in raw_headers:
if header in headers:
headers[header] = headers[header] + ', ' + raw_headers[header]
else:
headers[header] = raw_headers[header]
http_status_code = response.status
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, http_response, request_headers, request_body)
json_response = self.parse_json(http_response)
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.log("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except (concurrent.futures.TimeoutError, asyncio.TimeoutError) as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except aiohttp.ClientConnectionError as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except aiohttp.ClientError as e: # base exception class
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
async def fetchOHLCVC(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_balance(self, params={}):
raise NotSupported('fetch_balance() not supported yet')
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = await self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
async def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = await self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| 43.871148 | 355 | 0.618248 |
ace3df91f7bc0ddbe5098aa408fcb9fe177d1173 | 13,952 | py | Python | Scripts/TypeRig GUI/Utils/G-InsertElement.py | hwk1984/TypeRig | 34eda9b3eaa57d132a0a9b62e699d6e862d18382 | [
"BSD-3-Clause"
] | 1 | 2020-05-31T08:50:09.000Z | 2020-05-31T08:50:09.000Z | Scripts/TypeRig GUI/Utils/G-InsertElement.py | hwk1984/TypeRig | 34eda9b3eaa57d132a0a9b62e699d6e862d18382 | [
"BSD-3-Clause"
] | null | null | null | Scripts/TypeRig GUI/Utils/G-InsertElement.py | hwk1984/TypeRig | 34eda9b3eaa57d132a0a9b62e699d6e862d18382 | [
"BSD-3-Clause"
] | null | null | null | #FLM: TR: Insert elements (TypeRig)
# ----------------------------------------
# (C) Vassil Kateliev, 2019 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#-----------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -----------------
import os
import fontlab as fl6
import fontgate as fgt
from PythonQt import QtCore, QtGui
from typerig.proxy import pFont, pGlyph
from typerig.brain import Coord
# - Init --------------------------------
app_version = '1.2'
app_name = 'Insert elements'
# -- Syntax
syn_comment = '#'
syn_insert = '->'
syn_label = '!'
syn_anchor = '$'
syn_pos = '@'
syn_transform = '^'
syn_exprbegin = '('
syn_exprend = ')'
syn_coordsep =','
syn_namesep = ' '
syn_currglyph = syn_label + 'glyph'
syn_currnode = syn_label + 'node'
syn_bboxTL = syn_label + 'TL'
syn_bboxTR = syn_label + 'TR'
syn_bboxBL = syn_label + 'BL'
syn_bboxBR = syn_label + 'BR'
syn_passlayer = syn_label + 'passlayer'
# -- Strings
str_help = '''Examples:
_element_ -> A B C D
Inserts _element_ into glyphs with names /A, /B, /C, /D at the layer specified using the layer selector.
_element_ -> !glyph
Inserts _element_ into current ACTIVE GLYPH at layer selected.
_element_@-30,20 -> A
Inserts _element_ at COORDINATES -30,20 into glyph A at layer selected.
_element_@!foo -> A
Inserts _element_ at coordinates of node with TAG 'foo' at glyph /A
_element_@$bar -> A
Inserts _element_ at coordinates of ANCHOR named 'bar' at glyph /A
_element_@!node -> !glyph
Inserts _element_ at coordinates of the currently SELECTED NODE of current active glyph.
_element_@!node^40,0 -> !glyph
Inserts _element_ at coordinates with CORRECTION 40,0 of the currently selected node of current active glyph.
_element_@!TL -> A
Inserts _element_ at Top Left BBOX coordinates of the of glyph /A
Valid positional tags are !BL, !BR, !TL, !TR;
_element_!TL@!TL^-20,10 -> A
Inserts _element_ by matching the (TL) BBOX coordinates of _element_ to the -20,10 adjusted (TL) BBOX coordinates of the of glyph /A
e1@!foo e2@!baz e3@!bar -> H I K
Inserts elements e1, e2, e3 into every glyph (/H, /I, /K) at specified node tags
layer1 - > e1!BL@!foo e2!TL@!baz^-20,0 -> H N
layer2 - > e1!BL@!foo e2!TL@!baz^-50,0 -> H N
Inserts elements e1, e2, into every glyph (/H, /N) at specified node tags with correction different for every layer set explicitly.
'''
# - Classes --------------------------------
class TrPlainTextEdit(QtGui.QPlainTextEdit):
# - Custom QLine Edit extending the contextual menu with FL6 metric expressions
def __init__(self, *args, **kwargs):
super(TrPlainTextEdit, self).__init__(*args, **kwargs)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.__contextMenu)
def __contextMenu(self):
self._normalMenu = self.createStandardContextMenu()
self._addCustomMenuItems(self._normalMenu)
self._normalMenu.exec_(QtGui.QCursor.pos())
def _addCustomMenuItems(self, menu):
menu.addSeparator()
menu.addAction('Symbol: Insert', lambda: self.insertPlainText(syn_insert))
menu.addAction('Symbol: Attachment', lambda: self.insertPlainText(syn_pos))
menu.addAction('Symbol: Node Label', lambda: self.insertPlainText(syn_label))
menu.addAction('Symbol: Anchor Label', lambda: self.insertPlainText(syn_anchor))
menu.addAction('Symbol: Transform', lambda: self.insertPlainText(syn_transform))
menu.addAction('Symbol: Comment', lambda: self.insertPlainText(syn_comment))
menu.addSeparator()
menu.addAction('Tag: Current Glyph', lambda: self.insertPlainText(syn_currglyph))
menu.addAction('Tag: Current Node', lambda: self.insertPlainText(syn_currnode))
menu.addSeparator()
menu.addAction('Tag: BBoX Bottom Left', lambda: self.insertPlainText(syn_bboxBL))
menu.addAction('Tag: BBoX Bottom Right', lambda: self.insertPlainText(syn_bboxBR))
menu.addAction('Tag: BBoX Top Left', lambda: self.insertPlainText(syn_bboxTL))
menu.addAction('Tag: BBoX Top Right', lambda: self.insertPlainText(syn_bboxTR))
menu.addSeparator()
menu.addAction('Action: Insert selected glyph names', lambda: self.__add_names())
def __add_names(self):
temp_font = pFont()
selection = [g.name for g in temp_font.selectedGlyphs()]
self.insertPlainText(' '.join(selection))
# - Dialogs --------------------------------
class dlg_glyphComposer(QtGui.QDialog):
def __init__(self):
super(dlg_glyphComposer, self).__init__()
# - Init
self.active_font = pFont()
self.class_data = {}
# - Widgets
self.cmb_layer = QtGui.QComboBox()
self.cmb_layer.addItems(self.active_font.masters() + ['All masters'])
self.btn_saveExpr = QtGui.QPushButton('Save')
self.btn_loadExpr = QtGui.QPushButton('Load')
self.btn_exec = QtGui.QPushButton('Execute')
self.btn_exec.clicked.connect(self.process)
self.btn_saveExpr.clicked.connect(self.expr_toFile)
self.btn_loadExpr.clicked.connect(self.expr_fromFile)
self.txt_editor = TrPlainTextEdit()
#self.lbl_help = QtGui.QLabel(str_help)
self.lbl_help = QtGui.QLabel('Help: TODO!')
self.lbl_help.setWordWrap(True)
# - Build layouts
layoutV = QtGui.QGridLayout()
layoutV.addWidget(QtGui.QLabel('Process font master:'), 2, 0, 1, 2)
layoutV.addWidget(self.cmb_layer, 2, 2, 1, 2)
layoutV.addWidget(self.lbl_help, 3, 0, 1, 4)
layoutV.addWidget(self.txt_editor, 4, 0, 20, 4)
layoutV.addWidget(self.btn_saveExpr, 24, 0, 1, 2)
layoutV.addWidget(self.btn_loadExpr, 24, 2, 1, 2)
layoutV.addWidget(self.btn_exec, 25, 0, 1, 4)
# - Set Widget
self.setLayout(layoutV)
self.setWindowTitle('%s %s' %(app_name, app_version))
self.setGeometry(300, 300, 250, 500)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) # Always on top!!
self.show()
def expr_fromFile(self):
fontPath = os.path.split(self.active_font.fg.path)[0]
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load expressions from file', fontPath)
if fname != None:
with open(fname, 'r') as importFile:
self.txt_editor.setPlainText(importFile.read().decode('utf8'))
print 'LOAD:\t Font:%s; Expressions loaded from: %s.' %(self.active_font.name, fname)
def expr_toFile(self):
fontPath = os.path.split(self.active_font.fg.path)[0]
fname = QtGui.QFileDialog.getSaveFileName(self, 'Save expressions from file', fontPath, '*.txt')
if fname != None:
with open(fname, 'w') as importFile:
importFile.writelines(self.txt_editor.toPlainText().encode('utf-8'))
print 'SAVE:\t Font:%s; Expressions saved to: %s.' %(self.active_font.name, fname)
def process(self):
# - Init
self.active_font = pFont()
current_glyph = pGlyph()
getUniGlyph = lambda c: self.active_font.fl.findUnicode(ord(c)).name if all(['uni' not in c, '.' not in c, '_' not in c]) else c
process_layers = [self.cmb_layer.currentText] if self.cmb_layer.currentText != 'All masters' else self.active_font.masters()
# - Parse input ------------------------------------------------------------
for line in self.txt_editor.toPlainText().splitlines():
# - Init
process_glyphs = {}
dst_store, src_store = [], []
w_layer = syn_passlayer # Pass all commands - no specific layer selected
if syn_insert in line and syn_comment not in line:
init_parse = line.split(syn_insert)
if len(init_parse) == 2: # No specific layer given
left, rigth = init_parse
elif len(init_parse) == 3: # Layer explicitly set
w_layer, left, rigth = init_parse
w_layer = w_layer.strip()
else:
print 'ERROR:\tInvalid syntax! Skipping Line: %s\n' %line
continue
# - Set basics
#dst_store = [getUniGlyph(name) if syn_currglyph not in name else current_glyph.name for name in rigth.split()]
dst_store = [name if syn_currglyph not in name else current_glyph.name for name in rigth.split()]
src_temp = [item.strip().split(syn_pos) for item in left.split()]
src_temp = [[item[0], item[1].split(syn_transform)] if len(item) > 1 else item for item in src_temp]
process_glyphs = {glyph:src_temp for glyph in dst_store}
# - Process ------------------------------------------------------------
for layer in process_layers:
# - Process only specified layer or all
if layer == w_layer or w_layer == syn_passlayer:
for glyph_name, insert_command in process_glyphs.iteritems():
# - Set working glyph
w_glyph = self.active_font.glyph(glyph_name)
# - Process insertions
for insert in insert_command:
if len(insert):
# - Init
# -- Shape retrieval and origin determination
if len(insert[0]):
if syn_bboxBL in insert[0]: # Shape origin: measured at Shapes BBox Bottom Left
insert_name = insert[0].replace(syn_bboxBL, '')
w_shape = self.active_font.findShape(insert_name, layer)
insert_origin = Coord(w_shape.boundingBox.x(), w_shape.boundingBox.y())
elif syn_bboxBR in insert[0]: # Shape origin: measured at Shapes BBox Bottom Right
insert_name = insert[0].replace(syn_bboxBR, '')
w_shape = self.active_font.findShape(insert_name, layer)
insert_origin = Coord(w_shape.boundingBox.x() + w_shape.boundingBox.width(), w_shape.boundingBox.y())
elif syn_bboxTL in insert[0]: # Shape origin: measured at Shapes BBox Top Left
insert_name = insert[0].replace(syn_bboxTL, '')
w_shape = self.active_font.findShape(insert_name, layer)
insert_origin = Coord(w_shape.boundingBox.x(), w_shape.boundingBox.y() + w_shape.boundingBox.height())
elif syn_bboxTR in insert[0]: # Shape origin: measured at Shapes BBox Top Right
insert_name = insert[0].replace(syn_bboxTR, '')
w_shape = self.active_font.findShape(insert_name, layer)
insert_origin = Coord(w_shape.boundingBox.x() + w_shape.boundingBox.height(), w_shape.boundingBox.y() + w_shape.boundingBox.width())
elif syn_label in insert[0]: # Shape origin: At source Glyphs Labeled Node
insert_name, node_label = insert[0].split(syn_label)
for glyph in self.active_font.pGlyphs():
w_shape = glyph.findShape(insert_name, layer)
if w_shape is not None:
insert_origin = Coord(glyph.findNodeCoords(node_label, layer))
break
else: # Shape origin: Not set
insert_name = insert[0]
w_shape = self.active_font.findShape(insert_name, layer)
insert_origin = Coord(0,0)
else:
print 'ERROR:\tInvalid command! Skipping insertion command: %s\n' %insert
continue
# -- In-glyph positioning
insert_position = None
if len(insert) == 1: # Position: Simplest case no positional tags
insert_coord = Coord((0,0))
else:
if len(insert[1]):
w_bbox = w_glyph.getBounds(layer)
if syn_currnode == insert[1][0]: # Position: Destination Glyphs Currently selected node
position = w_glyph.selectedCoords(layer, applyTransform=True)
insert_position = position[0] if len(position) else None
elif syn_bboxBL == insert[1][0]: # Position: Destination Glyphs BBox Bottom Left
insert_position = (w_bbox.x(), w_bbox.y())
elif syn_bboxBR == insert[1][0]: # Position: Destination Glyphs BBox Bottom Right
insert_position = (w_bbox.x() + w_bbox.width(), w_bbox.y())
elif syn_bboxTL == insert[1][0]: # Position: Destination Glyphs BBox Top Left
insert_position = (w_bbox.x(), w_bbox.y() + w_bbox.height())
elif syn_bboxTR == insert[1][0]: # Position: Destination Glyphs BBox Top Right
insert_position = (w_bbox.x() + w_bbox.height(), w_bbox.y() + w_bbox.width())
elif syn_label in insert[1][0]: # Position: Destination Glyphs Labeled Node
insert_position = w_glyph.findNodeCoords(insert[1][0].strip(syn_label), layer)
elif syn_anchor in insert[1][0]: # Position: Destination Glyphs Anchor
insert_position = w_glyph.findAnchorCoords(insert[1][0].strip(syn_anchor), layer)
elif syn_coordsep in insert[1][0]: # Position: Destination Glyphs Coordinates
insert_position = eval('(%s)' %insert[1][0])
if len(insert[1]) > 1: # Positional correction in format (x,y)
insert_correction = Coord(eval('(%s)' %insert[1][1]))
else:
insert_correction = Coord((0,0))
if insert_position is None:
print 'ERROR:\tInvalid positional tags! Skipping insertion command: %s\n' %insert
continue
# - Set insertion coordinates
insert_coord = Coord(insert_position) + insert_correction
# - Insert and reposition
# !!! A quirky way of adding shapes follows
# !!! This is so very wrong - adding the shape twice and removing the first,
# !!! forces FL to make a proper clone of the shape!?
temp_shape = w_glyph.addShape(w_shape, layer) # A dummy that helps ??!
new_shape = w_glyph.addShape(w_shape, layer)
w_glyph.layer(layer).removeShape(temp_shape)
new_shape.assignStyle(w_shape) # The only way to copy the 'non-spacing' property for now
new_position = insert_coord - insert_origin
new_transform = QtGui.QTransform(1, 0, 0, 0, 1, 0, new_position.x, new_position.y, 1)
new_shape.transform = new_transform
w_glyph.update()
#print 'New: %s; Insert: %s; Origin: %s' %(new_position, insert_coord, insert_origin)
# - Finish
w_glyph.updateObject(w_glyph.fl, 'Shapes inserted to glyph: %s' %w_glyph.name, verbose=False)
print 'DONE:\t Glyphs processed: %s' %' '.join(dst_store)
print 'Done.'
# - RUN ------------------------------
dialog = dlg_glyphComposer() | 40.914956 | 142 | 0.668148 |
ace3dffe4c9d413905a4802c9dabdbcfef5ea7c9 | 5,371 | py | Python | dallinger/db.py | istresearch/Dallinger | 47e4967ded9e01edbc8c1ae7132c9ec30a87f116 | [
"MIT"
] | 1 | 2020-01-29T04:13:26.000Z | 2020-01-29T04:13:26.000Z | dallinger/db.py | jcpeterson/Dallinger | 55bf00efddb19ab8b7201b65c461996793edf6f4 | [
"MIT"
] | null | null | null | dallinger/db.py | jcpeterson/Dallinger | 55bf00efddb19ab8b7201b65c461996793edf6f4 | [
"MIT"
] | 1 | 2019-02-07T14:16:39.000Z | 2019-02-07T14:16:39.000Z | """Create a connection to the database."""
from contextlib import contextmanager
from functools import wraps
import logging
import os
import sys
from psycopg2.extensions import TransactionRollbackError
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import OperationalError
logger = logging.getLogger('dallinger.db')
db_url_default = "postgresql://dallinger:dallinger@localhost/dallinger"
db_url = os.environ.get("DATABASE_URL", db_url_default)
engine = create_engine(db_url, pool_size=1000)
session_factory = sessionmaker(
autocommit=False,
autoflush=True,
bind=engine,
)
session = scoped_session(session_factory)
Base = declarative_base()
Base.query = session.query_property()
db_user_warning = """
*********************************************************
*********************************************************
Dallinger now requires a database user named "dallinger".
Run:
createuser -P dallinger --createdb
Consult the developer guide for more information.
*********************************************************
*********************************************************
"""
@contextmanager
def sessions_scope(local_session, commit=False):
"""Provide a transactional scope around a series of operations."""
try:
yield local_session
if commit:
local_session.commit()
logger.debug('DB session auto-committed as requested')
except Exception as e:
# We log the exception before re-raising it, in case the rollback also
# fails
logger.exception('Exception during scoped worker transaction, '
'rolling back.')
# This rollback is potentially redundant with the remove call below,
# depending on how the scoped session is configured, but we'll be
# explicit here.
local_session.rollback()
raise e
finally:
local_session.remove()
logger.debug('Session complete, db session closed')
def scoped_session_decorator(func):
"""Manage contexts and add debugging to db sessions."""
@wraps(func)
def wrapper(*args, **kwargs):
with sessions_scope(session):
# The session used in func comes from the funcs globals, but
# it will be a proxied thread local var from the session
# registry, and will therefore be identical to the one returned
# by the context manager above.
logger.debug('Running worker %s in scoped DB session',
func.__name__)
return func(*args, **kwargs)
return wrapper
def init_db(drop_all=False, bind=engine):
"""Initialize the database, optionally dropping existing tables."""
try:
if drop_all:
Base.metadata.drop_all(bind=bind)
Base.metadata.create_all(bind=bind)
except OperationalError as err:
msg = 'password authentication failed for user "dallinger"'
if msg in err.message:
sys.stderr.write(db_user_warning)
raise
return session
def serialized(func):
"""Run a function within a db transaction using SERIALIZABLE isolation.
With this isolation level, committing will fail if this transaction
read data that was since modified by another transaction. So we need
to handle that case and retry the transaction.
"""
@wraps(func)
def wrapper(*args, **kw):
attempts = 100
session.remove()
while attempts > 0:
try:
session.connection(
execution_options={'isolation_level': 'SERIALIZABLE'})
result = func(*args, **kw)
session.commit()
return result
except OperationalError as exc:
session.rollback()
if isinstance(exc.orig, TransactionRollbackError):
if attempts > 0:
attempts -= 1
else:
raise Exception(
'Could not commit serialized transaction '
'after 100 attempts.')
else:
raise
finally:
session.remove()
return wrapper
# Reset outbox when session begins
@event.listens_for(Session, 'after_begin')
def after_begin(session, transaction, connection):
session.info['outbox'] = []
# Reset outbox after rollback
@event.listens_for(Session, 'after_soft_rollback')
def after_soft_rollback(session, previous_transaction):
session.info['outbox'] = []
def queue_message(channel, message):
logger.debug(
'Enqueueing message to {}: {}'.format(channel, message))
if 'outbox' not in session.info:
session.info['outbox'] = []
session.info['outbox'].append((channel, message))
# Publish messages to redis after commit
@event.listens_for(Session, 'after_commit')
def after_commit(session):
from dallinger.heroku.worker import conn as redis
for channel, message in session.info.get('outbox', ()):
logger.debug(
'Publishing message to {}: {}'.format(channel, message))
redis.publish(channel, message)
| 31.409357 | 78 | 0.624092 |
ace3e015edafa11cbb6589ad6aa5417cae1d3edb | 1,529 | py | Python | tilenol/xcb/keysymparse.py | tailhook/tilenol | 3b71f6600d437a4e5f167315683e7f0137cd3788 | [
"MIT"
] | 42 | 2015-01-19T15:43:16.000Z | 2021-09-19T15:20:58.000Z | tilenol/xcb/keysymparse.py | tailhook/tilenol | 3b71f6600d437a4e5f167315683e7f0137cd3788 | [
"MIT"
] | 2 | 2015-05-30T03:15:17.000Z | 2019-02-28T16:37:02.000Z | tilenol/xcb/keysymparse.py | tailhook/tilenol | 3b71f6600d437a4e5f167315683e7f0137cd3788 | [
"MIT"
] | 11 | 2015-10-04T06:01:02.000Z | 2022-03-26T18:46:46.000Z | import os
import re
import logging
log = logging.getLogger(__name__)
keysym_re = re.compile(
r"^#define\s+(XF86)?XK_(\w+)\s+"
r"(?:(0x[a-fA-F0-9]+)|_EVDEV\((0x[0-9a-fA-F]+)\))"
)
class Keysyms(object):
__slots__ = ('name_to_code', 'code_to_name', '__dict__')
def __init__(self):
self.name_to_code = {}
self.code_to_name = {}
def add_from_file(self, filename):
with open(filename, 'rt') as f:
for line in f:
m = keysym_re.match(line)
if not m:
continue
name = (m.group(1) or '') + m.group(2)
if m.group(3):
try:
code = int(m.group(3), 0)
except ValueError:
log.warn("Bad code %r for key %r", code, name)
continue
elif m.group(4):
try:
code = int(m.group(4), 0)
except ValueError:
log.warn("Bad code %r for evdev key %r", code, name)
continue
else:
continue
self.__dict__[name] = code
self.name_to_code[name] = code
self.code_to_name[code] = name
def load_default(self):
xproto_dir = os.environ.get("XPROTO_DIR", "/usr/include/X11")
self.add_from_file(xproto_dir + '/keysymdef.h')
self.add_from_file(xproto_dir + '/XF86keysym.h')
| 29.980392 | 76 | 0.470896 |
ace3e07287498df81825b4cce83de8d362eb240c | 17,259 | py | Python | tlib/es/index_stress.py | txu2008/TXLIB | 402f3730e8710803675736e0f2e25719cd646409 | [
"MIT"
] | 1 | 2019-08-08T05:06:28.000Z | 2019-08-08T05:06:28.000Z | tlib/es/index_stress.py | txu2008/TXLIB | 402f3730e8710803675736e0f2e25719cd646409 | [
"MIT"
] | 1 | 2020-03-24T05:53:20.000Z | 2020-03-24T05:53:20.000Z | tlib/es/index_stress.py | txu2008/TXLIB | 402f3730e8710803675736e0f2e25719cd646409 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/11 14:02
# @Author : Tao.Xu
# @Email : tao.xu2008@outlook.com
"""Elasticsearch Stress
FYI: https://github.com/logzio/elasticsearch-stress-test
"""
import random
import string
import time
import sys
import json
# import urllib3
import threading
from threading import Lock, Thread, Condition, Event
from concurrent.futures import ThreadPoolExecutor, as_completed
from elasticsearch import Elasticsearch
from elasticsearch.connection import create_ssl_context
from elasticsearch.exceptions import TransportError
from tlib import log
from tlib.retry import retry
from tlib.utils import util
# =============================
# --- Global
# =============================
logger = log.get_logger()
# urllib3.disable_warnings()
ES_CONN_TIMEOUT = 10800 # 180 min = 180 * 60 = 10800
ES_OPERATION_TIMEOUT = '180m'
class ElasticsearchObj(object):
"""ElasticsearchObj"""
_conn = None
def __init__(self, esaddress, username, password, port, cafile, no_verify):
super(ElasticsearchObj, self).__init__()
self.esaddress = esaddress
self.username = username
self.password = password
self.port = port
self.cafile = cafile
self.no_verify = no_verify
@retry(tries=5, delay=3)
def connect(self):
"""
Initiate the elasticsearch session, We increase the timeout here from the default value (10 seconds)
to ensure we wait for requests to finish even if the cluster is overwhelmed and
it takes a bit longer to process one bulk.
:return:
"""
try:
logger.info(
"Connect to ES({0},{1},{2},{3})...".format(self.esaddress, self.username, self.password, self.port))
context = create_ssl_context(cafile=self.cafile) if self.cafile else ''
auth = (self.username, self.password) if self.username and self.password else ()
es_conn = Elasticsearch(self.esaddress, http_auth=auth, verify_certs=(not self.no_verify),
ssl_context=context, port=self.port, timeout=ES_CONN_TIMEOUT)
return es_conn
except Exception as e:
raise Exception("Failed:Connect to ES!\n{0}".format(e))
@property
def conn(self):
if self._conn is None:
self._conn = self.connect()
return self._conn
def get_cat_index_info(self, index_name=None):
cat_result_list = self.conn.cat.indices(index=index_name, v=True).split('\n')
index_info = dict()
if cat_result_list:
if index_name is None:
index_info = []
for i in range(1, len(cat_result_list)):
index_info.append(dict(zip(cat_result_list[0].split(), cat_result_list[i].split())))
else:
index_info = dict(zip(cat_result_list[0].split(), cat_result_list[1].split()))
return index_info
@property
def es_indices_names(self):
# return [es_indices.split()[2] for es_indices in self.conn.cat.indices().strip().split('\n')]
es_indices_names = []
for es_indices in self.conn.cat.indices().strip().split('\n'):
es_indices_info = es_indices.split()
if len(es_indices_info) > 3:
es_indices_names.append(es_indices_info[2])
return es_indices_names
@retry(tries=3, delay=3, jitter=1, raise_exception=False)
def delete_indices(self, index):
"""
delete index from indices
:param index:
:return:
"""
try:
logger.info("Delete indices:{0} ...".format(index))
self.conn.indices.delete(index=index, ignore=[400, 404])
return True
except Exception as e:
raise Exception("Failed:delete index {0}. Continue anyway..\n{1}".format(index, e))
@retry(tries=20, delay=3, jitter=1)
def create_indices(self, index, shards, replicas):
try:
# And create it in ES with the shard count and replicas
logger.info("Create indices:index={0},shards={1}, replicas={2} ...".format(index, shards, replicas))
self.conn.indices.create(index=index, body={"settings": {"number_of_shards": shards,
"number_of_replicas": replicas}})
return True
except TransportError as e:
if 'exists' in e.error:
logger.warning(e)
return True
raise Exception("Failed:Create index!\n{0}".format(e))
def multi_delete_indices(self, index_list, name_start=None):
pool = ThreadPoolExecutor(max_workers=100)
futures = []
for index in index_list:
if name_start and not index.startswith(name_start):
continue
futures.append(pool.submit(self.delete_indices, index))
pool.shutdown()
future_result = [future.result() for future in as_completed(futures)]
result = False if False in future_result else True
return result
def multi_create_indices(self, index_list, shards, replicas):
pool = ThreadPoolExecutor(max_workers=100)
futures = []
for index in index_list:
futures.append(pool.submit(self.create_indices, index, shards, replicas))
pool.shutdown()
future_result = [future.result() for future in as_completed(futures)]
result = False if False in future_result else True
return result
@retry(tries=30, delay=10)
def wait_for_green(self):
try:
self.conn.cluster.health(wait_for_status='green', master_timeout='600s', timeout='600s')
return True
except Exception as e:
raise Exception(e)
class ESIndexStress(ElasticsearchObj):
"""
Elasticsearch Stress
FYI: https://github.com/logzio/elasticsearch-stress-test
"""
def __init__(self, esaddress, username, password, port, cafile, no_verify, indices, documents, clients, seconds,
number_of_shards, number_of_replicas, bulk_size, max_fields_per_document, max_size_per_field, cleanup,
stats_frequency, green, index_name=None):
super(ESIndexStress, self).__init__(esaddress, username, password, port, cafile, no_verify)
self.esaddress = esaddress
self.indices = indices
self.documents = documents
self.clients = clients
self.seconds = seconds
self.number_of_shards = number_of_shards
self.number_of_replicas = number_of_replicas
self.bulk_size = bulk_size
self.max_fields_per_document = max_fields_per_document
self.max_size_per_field = max_size_per_field
self.cleanup = cleanup # cleanup index after test complete, if True
self.stats_frequency = stats_frequency
self.green = green
self.index_name = index_name
# Placeholders
self.start_timestamp = 0
self.success_bulks = 0
self.failed_bulks = 0
self.total_size = 0
# Thread safe
self.success_lock = Lock()
self.fail_lock = Lock()
self.size_lock = Lock()
self.shutdown_event = Event()
# Helper functions
def increment_success(self):
# First, lock
self.success_lock.acquire()
try:
self.success_bulks += 1
finally: # Just in case
# Release the lock
self.success_lock.release()
def increment_failure(self):
# First, lock
self.fail_lock.acquire()
try:
self.failed_bulks += 1
finally: # Just in case
# Release the lock
self.fail_lock.release()
def increment_size(self, size):
# First, lock
self.size_lock.acquire()
try:
self.total_size += size
finally: # Just in case
# Release the lock
self.size_lock.release()
def has_timeout(self, start_timestamp):
# Match to the timestamp
if (start_timestamp + self.seconds) > int(time.time()):
return False
return True
# Just to control the minimum value globally (though its not configurable)
@staticmethod
def generate_random_int(max_size):
try:
return random.randint(1, max_size)
except Exception as e:
print("Not supporting {0} as valid sizes!".format(max_size))
raise e
# Generate a random string with length of 1 to provided param
def generate_random_string(self, max_size):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(self.generate_random_int(max_size)))
# Create a document template
def generate_document(self):
temp_doc = {}
# Iterate over the max fields
for _ in range(self.generate_random_int(self.max_fields_per_document)):
# Generate a field, with random content
temp_doc[self.generate_random_string(10)] = self.generate_random_string(self.max_size_per_field)
# Return the created document
return temp_doc
def fill_documents(self, documents_templates):
"""
fill document with random string from template
:param documents_templates:
:return:
"""
document_list = []
# Generating 10 random subsets
for _ in range(10):
# Get a temp document
random_doc = random.choice(documents_templates)
# Populate the fields
temp_doc = {}
for field in random_doc:
temp_doc[field] = self.generate_random_string(self.max_size_per_field)
document_list.append(temp_doc)
return document_list
def client_worker(self, indices, document_list):
# Running until timeout
thread_id = threading.current_thread()
logger.info("Perform the bulk operation, bulk_size:{0} ({1})...".format(self.bulk_size, thread_id))
while (not self.has_timeout(self.start_timestamp)) and (not self.shutdown_event.is_set()):
curr_bulk = ""
# Iterate over the bulk size
for _ in range(self.bulk_size):
# Generate the bulk operation
curr_bulk += "{0}\n".format(json.dumps({"index": {"_index": random.choice(indices),
"_type": "stresstest"}}))
curr_bulk += "{0}\n".format(json.dumps(random.choice(document_list)))
try:
# Perform the bulk operation
self.conn.bulk(body=curr_bulk, timeout=ES_OPERATION_TIMEOUT)
# Adding to success bulks
self.increment_success()
# Adding to size (in bytes)
self.increment_size(sys.getsizeof(str(curr_bulk)))
except Exception as e:
# Failed. incrementing failure
self.increment_failure()
logger.error(e)
def generate_clients(self, indices, document_list):
# Clients placeholder
temp_clients = []
# Iterate over the clients count
for _ in range(self.clients):
temp_thread = Thread(target=self.client_worker, args=[indices, document_list])
temp_thread.daemon = True
# Create a thread and push it to the list
temp_clients.append(temp_thread)
# Return the clients
return temp_clients
def generate_documents(self):
# Documents placeholder
temp_documents = []
# Iterate over the clients count
for _ in range(self.documents):
# Create a document and push it to the list
temp_documents.append(self.generate_document())
# Return the documents
return temp_documents
def generate_indices(self):
# Placeholder
temp_indices = []
# Iterate over the indices count
for x in range(self.indices):
# Generate the index name
temp_index = '{0}_{1}'.format(self.index_name, x) if self.index_name else self.generate_random_string(16)
temp_indices.append(temp_index)
self.multi_create_indices(temp_indices, self.number_of_shards, self.number_of_replicas)
return temp_indices
def print_stats(self):
# Calculate elpased time
elapsed_time = (int(time.time()) - self.start_timestamp)
# Calculate size in MB
size_mb = self.total_size / 1024 / 1024
# Protect division by zero
if elapsed_time == 0:
mbs = 0
else:
mbs = size_mb / float(elapsed_time)
# Print stats to the user
logger.info("Elapsed time: {0} seconds".format(elapsed_time))
logger.info("Successful bulks: {0} ({1} documents)".format(self.success_bulks, (self.success_bulks * self.bulk_size)))
logger.info("Failed bulks: {0} ({1} documents)".format(self.failed_bulks, (self.failed_bulks * self.bulk_size)))
logger.info("Indexed approximately {0} MB which is {1:.2f} MB/s".format(size_mb, mbs))
logger.info("")
def print_stats_worker(self):
# Create a conditional lock to be used instead of sleep (prevent dead locks)
lock = Condition()
# Acquire it
lock.acquire()
# Print the stats every STATS_FREQUENCY seconds
while (not self.has_timeout(self.start_timestamp)) and (not self.shutdown_event.is_set()):
# Wait for timeout
lock.wait(self.stats_frequency)
# To avoid double printing
if not self.has_timeout(self.start_timestamp):
# Print stats
self.print_stats()
def run(self):
clients = []
all_indices = []
# Set the timestamp
self.start_timestamp = int(time.time())
logger.info("")
logger.info("Starting initialization of {0} ...".format(self.esaddress))
logger.info("Generate docs ...")
documents_templates = self.generate_documents()
document_list = self.fill_documents(documents_templates)
logger.info("Done!")
logger.info("Creating indices.. ")
indices = self.generate_indices()
all_indices.extend(indices)
logger.info("Done!")
if self.green:
logger.info('Check es cluster health ...')
self.wait_for_green()
logger.info("Done!")
logger.info("Generating documents and workers.. ") # Generate the clients
clients.extend(self.generate_clients(indices, document_list))
logger.info("Done!")
logger.info("Starting the test. Will print stats every {0} seconds.".format(self.stats_frequency))
logger.info("The test would run for {0} seconds, but it might take a bit more "
"because we are waiting for current bulk operation to complete.".format(self.seconds))
original_active_count = threading.active_count()
# Run the clients!
for d in clients:
d.start()
# Create and start the print stats thread
stats_thread = Thread(target=self.print_stats_worker)
stats_thread.daemon = True
stats_thread.start()
for c in clients:
while c.is_alive():
try:
c.join(timeout=0.1)
except KeyboardInterrupt:
logger.info("")
logger.info("Ctrl-c received! Sending kill to threads...")
self.shutdown_event.set()
# set loop flag true to get into loop
flag = True
while flag:
# sleep 2 secs that we don't loop to often
time.sleep(2)
'''
# set loop flag to false. If there is no thread still alive it will stay false
flag = False
# loop through each running thread and check if it is alive
for t in threading.enumerate():
# if one single thread is still alive repeat the loop
if t.isAlive():
flag = True
'''
# wait the bulk threads complete!
bulk_active_count = threading.active_count() - original_active_count
if bulk_active_count > 0:
print('bulk_active_count: {0}'.format(bulk_active_count))
flag = True
else:
flag = False
if self.cleanup:
logger.info("Cleaning up created indices.. ")
self.multi_delete_indices(all_indices)
logger.info('')
logger.info("Test is done! Final results:")
self.print_stats()
if self.cleanup:
logger.info("Cleaning up created indices.. ")
self.multi_delete_indices(all_indices)
logger.info("Done!")
| 37.357143 | 126 | 0.596558 |
ace3e0b96b91d3a2daf52827de0d2b2bc960b01d | 522 | py | Python | plotly/validators/histogram/_autobinx.py | paulamool/plotly.py | 6121ac1f324e247e4e4b2964d65d7393377777c0 | [
"MIT"
] | 1 | 2020-08-26T03:30:36.000Z | 2020-08-26T03:30:36.000Z | plotly/validators/histogram/_autobinx.py | paulamool/plotly.py | 6121ac1f324e247e4e4b2964d65d7393377777c0 | [
"MIT"
] | 1 | 2020-12-15T16:56:11.000Z | 2020-12-15T16:56:11.000Z | plotly/validators/histogram/_autobinx.py | skeptycal/plotly.py | 2e5bf6e2f7c213295c405ece3e859f4d3f8030d1 | [
"MIT"
] | 1 | 2019-02-14T05:18:20.000Z | 2019-02-14T05:18:20.000Z | import _plotly_utils.basevalidators
class AutobinxValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='autobinx', parent_name='histogram', **kwargs
):
super(AutobinxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
implied_edits=kwargs.pop('implied_edits', {}),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 30.705882 | 71 | 0.632184 |
ace3e0d11841a0e1cdaeed7d32a364e701147722 | 39,846 | py | Python | BaseTools/Source/Python/UPT/Library/Parsing.py | christopherco/RPi-UEFI | 48fd8bb20dd4d45a4cf0a8970a65837e45bbaa99 | [
"BSD-2-Clause"
] | 93 | 2016-10-27T12:03:57.000Z | 2022-03-29T15:22:10.000Z | BaseTools/Source/Python/UPT/Library/Parsing.py | khezami/RPi-UEFI | 5bfd48d674e6c7efea6e31f9eb97b9da90c20263 | [
"BSD-2-Clause"
] | 16 | 2016-11-02T02:08:40.000Z | 2021-06-03T21:18:06.000Z | BaseTools/Source/Python/UPT/Library/Parsing.py | JayLeeCompal/EDKII_Git | de4800d50e1f357002bf77235d3bebabd0c00007 | [
"MIT"
] | 41 | 2016-11-02T00:05:02.000Z | 2022-03-29T14:33:09.000Z | ## @file
# This file is used to define common parsing related functions used in parsing
# INF/DEC/DSC process
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
Parsing
'''
##
# Import Modules
#
import os.path
import re
from Library.String import RaiseParserError
from Library.String import GetSplitValueList
from Library.String import CheckFileType
from Library.String import CheckFileExist
from Library.String import CleanString
from Library.String import NormPath
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import FatalError
from Logger.ToolError import FORMAT_INVALID
from Library import DataType
from Library.Misc import GuidStructureStringToGuidString
from Library.Misc import CheckGuidRegFormat
from Logger import StringTable as ST
import Logger.Log as Logger
from Parser.DecParser import Dec
import GlobalData
gPKG_INFO_DICT = {}
## GetBuildOption
#
# Parse a string with format "[<Family>:]<ToolFlag>=Flag"
# Return (Family, ToolFlag, Flag)
#
# @param String: String with BuildOption statement
# @param File: The file which defines build option, used in error report
#
def GetBuildOption(String, File, LineNo= -1):
(Family, ToolChain, Flag) = ('', '', '')
if String.find(DataType.TAB_EQUAL_SPLIT) < 0:
RaiseParserError(String, 'BuildOptions', File, \
'[<Family>:]<ToolFlag>=Flag', LineNo)
else:
List = GetSplitValueList(String, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
if List[0].find(':') > -1:
Family = List[0][ : List[0].find(':')].strip()
ToolChain = List[0][List[0].find(':') + 1 : ].strip()
else:
ToolChain = List[0].strip()
Flag = List[1].strip()
return (Family, ToolChain, Flag)
## Get Library Class
#
# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
List = GetSplitValueList(Item[0])
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(List) != 2:
RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, \
'<LibraryClassKeyWord>|<LibraryInstance>')
else:
CheckFileType(List[1], '.Inf', ContainerFile, \
'library class instance', Item[0], LineNo)
CheckFileExist(WorkspaceDir, List[1], ContainerFile, \
'LibraryClasses', Item[0], LineNo)
if Item[1] != '':
SupMod = Item[1]
return (List[0], List[1], SupMod)
## Get Library Class
#
# Get Library of Dsc as <LibraryClassKeyWord>[|<LibraryInstance>]
# [|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1):
ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(ItemList) > 5:
RaiseParserError\
(Item[0], 'LibraryClasses', ContainerFile, \
'<LibraryClassKeyWord>[|<LibraryInstance>]\
[|<TokenSpaceGuidCName>.<PcdCName>]')
else:
CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', \
Item[0], LineNo)
CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, \
'LibraryClasses', Item[0], LineNo)
if ItemList[2] != '':
CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', \
ContainerFile, LineNo)
if Item[1] != '':
SupMod = Item[1]
return (ItemList[0], ItemList[1], ItemList[2], SupMod)
## CheckPcdTokenInfo
#
# Check if PcdTokenInfo is following <TokenSpaceGuidCName>.<PcdCName>
#
# @param TokenInfoString: String to be checked
# @param Section: Used for error report
# @param File: Used for error report
#
def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>'
if TokenInfoString != '' and TokenInfoString != None:
TokenInfoList = GetSplitValueList(TokenInfoString, DataType.TAB_SPLIT)
if len(TokenInfoList) == 2:
return True
RaiseParserError(TokenInfoString, Section, File, Format, LineNo)
## Get Pcd
#
# Get Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>
# [|<Type>|<MaximumDatumSize>]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <Value>[|<Type>|<MaximumDatumSize>]
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 6:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
[|<Type>|<MaximumDatumSize>]', LineNo)
else:
Value = List[1]
MaximumDatumSize = List[2]
Token = List[3]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type)
## Get FeatureFlagPcd
#
# Get FeatureFlagPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
#
# @param Item: String as <PcdTokenSpaceGuidCName>
# .<TokenCName>|TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value = '', '', ''
List = GetSplitValueList(Item)
if len(List) != 2:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE', \
LineNo)
else:
Value = List[1]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, Type)
## Get DynamicDefaultPcd
#
# Get DynamicDefaultPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>
# |<Value>[|<DatumTyp>[|<MaxDatumSize>]]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 8:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
[|<DatumTyp>[|<MaxDatumSize>]]', LineNo)
else:
Value = List[1]
DatumTyp = List[2]
MaxDatumSize = List[3]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type)
## Get DynamicHiiPcd
#
# Get DynamicHiiPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|
# <VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2, List3, List4, List5 = \
'', '', '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 6 or len(List) > 8:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<String>|\
<VariableGuidCName>|<VariableOffset>[|<DefaultValue>\
[|<MaximumDatumSize>]]', LineNo)
else:
List1, List2, List3, List4, List5 = \
List[1], List[2], List[3], List[4], List[5]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, List1, List2, List3, List4, List5, Type)
## Get DynamicVpdPcd
#
# Get DynamicVpdPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <VpdOffset>[|<MaximumDatumSize>]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>
# |TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2 = '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
if len(List) < 3 or len(List) > 4:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>\
[|<MaximumDatumSize>]', LineNo)
else:
List1, List2 = List[1], List[2]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, List1, List2, Type)
## GetComponent
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
# @param KeyValues: To store data after parsing
#
def GetComponent(Lines, KeyValues):
(FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
FindPcdsDynamicEx) = (False, False, False, False, False, False, False, \
False)
ListItem = None
LibraryClassItem = []
BuildOption = []
Pcd = []
for Line in Lines:
Line = Line[0]
#
# Ignore !include statement
#
if Line.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1 or \
Line.upper().find(DataType.TAB_DEFINE + ' ') > -1:
continue
if FindBlock == False:
ListItem = Line
#
# find '{' at line tail
#
if Line.endswith('{'):
FindBlock = True
ListItem = CleanString(Line.rsplit('{', 1)[0], \
DataType.TAB_COMMENT_SPLIT)
#
# Parse a block content
#
if FindBlock:
if Line.find('<LibraryClasses>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(True, False, False, False, False, False, False)
continue
if Line.find('<BuildOptions>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, True, False, False, False, False, False)
continue
if Line.find('<PcdsFeatureFlag>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, True, False, False, False, False)
continue
if Line.find('<PcdsPatchableInModule>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, True, False, False, False)
continue
if Line.find('<PcdsFixedAtBuild>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, True, False, False)
continue
if Line.find('<PcdsDynamic>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, True, False)
continue
if Line.find('<PcdsDynamicEx>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, True)
continue
if Line.endswith('}'):
#
# find '}' at line tail
#
KeyValues.append([ListItem, LibraryClassItem, \
BuildOption, Pcd])
(FindBlock, FindLibraryClass, FindBuildOption, \
FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
LibraryClassItem, BuildOption, Pcd = [], [], []
continue
if FindBlock:
if FindLibraryClass:
LibraryClassItem.append(Line)
elif FindBuildOption:
BuildOption.append(Line)
elif FindPcdsFeatureFlag:
Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line))
elif FindPcdsPatchableInModule:
Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line))
elif FindPcdsFixedAtBuild:
Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line))
elif FindPcdsDynamic:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line))
elif FindPcdsDynamicEx:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line))
else:
KeyValues.append([ListItem, [], [], []])
return True
## GetExec
#
# Parse a string with format "InfFilename [EXEC = ExecFilename]"
# Return (InfFilename, ExecFilename)
#
# @param String: String with EXEC statement
#
def GetExec(String):
InfFilename = ''
ExecFilename = ''
if String.find('EXEC') > -1:
InfFilename = String[ : String.find('EXEC')].strip()
ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip()
else:
InfFilename = String.strip()
return (InfFilename, ExecFilename)
## GetComponents
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
# [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get component successfully
#
def GetComponents(Lines, KeyValues, CommentCharacter):
if Lines.find(DataType.TAB_SECTION_END) > -1:
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
(FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
ListItem = None
LibraryClassItem = []
BuildOption = []
Pcd = []
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line == None or Line == '':
continue
if FindBlock == False:
ListItem = Line
#
# find '{' at line tail
#
if Line.endswith('{'):
FindBlock = True
ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter)
#
# Parse a block content
#
if FindBlock:
if Line.find('<LibraryClasses>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(True, False, False, False, False, False, False)
continue
if Line.find('<BuildOptions>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, True, False, False, False, False, False)
continue
if Line.find('<PcdsFeatureFlag>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, True, False, False, False, False)
continue
if Line.find('<PcdsPatchableInModule>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, True, False, False, False)
continue
if Line.find('<PcdsFixedAtBuild>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, True, False, False)
continue
if Line.find('<PcdsDynamic>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, True, False)
continue
if Line.find('<PcdsDynamicEx>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, True)
continue
if Line.endswith('}'):
#
# find '}' at line tail
#
KeyValues.append([ListItem, LibraryClassItem, BuildOption, \
Pcd])
(FindBlock, FindLibraryClass, FindBuildOption, \
FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
LibraryClassItem, BuildOption, Pcd = [], [], []
continue
if FindBlock:
if FindLibraryClass:
LibraryClassItem.append(Line)
elif FindBuildOption:
BuildOption.append(Line)
elif FindPcdsFeatureFlag:
Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line))
elif FindPcdsPatchableInModule:
Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line))
elif FindPcdsFixedAtBuild:
Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line))
elif FindPcdsDynamic:
Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line))
elif FindPcdsDynamicEx:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line))
else:
KeyValues.append([ListItem, [], [], []])
return True
## Get Source
#
# Get Source of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
#
# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class, used
# for error report
#
def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
List = GetSplitValueList(ItemNew)
if len(List) < 5 or len(List) > 9:
RaiseParserError(Item, 'Sources', ContainerFile, \
'<Filename>[|<Family>[|<TagName>[|<ToolCode>\
[|<PcdFeatureFlag>]]]]', LineNo)
List[0] = NormPath(List[0])
CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', \
Item, LineNo)
if List[4] != '':
CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo)
return (List[0], List[1], List[2], List[3], List[4])
## Get Binary
#
# Get Binary of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
#
# @param Item: String as <Filename>[|<Family>[|<TagName>
# [|<ToolCode>[|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetBinary(Item, ContainerFile, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
if len(List) < 3 or len(List) > 5:
RaiseParserError(Item, 'Binaries', ContainerFile, \
"<FileType>|<Filename>[|<Target>\
[|<TokenSpaceGuidCName>.<PcdCName>]]", LineNo)
if len(List) >= 4:
if List[3] != '':
CheckPcdTokenInfo(List[3], 'Binaries', ContainerFile, LineNo)
return (List[0], List[1], List[2], List[3])
elif len(List) == 3:
return (List[0], List[1], List[2], '')
## Get Guids/Protocols/Ppis
#
# Get Guids/Protocols/Ppis of Inf as <GuidCName>[|<PcdFeatureFlag>]
#
# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfInf(Item):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
return (List[0], List[1])
## Get Guids/Protocols/Ppis
#
# Get Guids/Protocols/Ppis of Dec as <GuidCName>=<GuidValue>
#
# @param Item: String as <GuidCName>=<GuidValue>
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', \
LineNo)
#
#convert C-Format Guid to Register Format
#
if List[1][0] == '{' and List[1][-1] == '}':
RegisterFormatGuid = GuidStructureStringToGuidString(List[1])
if RegisterFormatGuid == '':
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
else:
if CheckGuidRegFormat(List[1]):
RegisterFormatGuid = List[1]
else:
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
return (List[0], RegisterFormatGuid)
## GetPackage
#
# Get Package of Inf as <PackagePath>[|<PcdFeatureFlag>]
#
# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Packages', \
List[0], LineNo)
if List[1] != '':
CheckPcdTokenInfo(List[1], 'Packages', ContainerFile, LineNo)
return (List[0], List[1])
## Get Pcd Values of Inf
#
# Get Pcd of Inf as <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
#
# @param Item: The string describes pcd
# @param Type: The type of Pcd
# @param File: The file which describes the pcd, used for error report
#
def GetPcdOfInf(Item, Type, File, LineNo):
Format = '<TokenSpaceGuidCName>.<PcdCName>[|<Value>]'
TokenGuid, TokenName, Value, InfType = '', '', '', ''
if Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
InfType = DataType.TAB_INF_FIXED_PCD
elif Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
InfType = DataType.TAB_INF_PATCH_PCD
elif Type == DataType.TAB_PCDS_FEATURE_FLAG:
InfType = DataType.TAB_INF_FEATURE_PCD
elif Type == DataType.TAB_PCDS_DYNAMIC_EX:
InfType = DataType.TAB_INF_PCD_EX
elif Type == DataType.TAB_PCDS_DYNAMIC:
InfType = DataType.TAB_INF_PCD
List = GetSplitValueList(Item, DataType.TAB_VALUE_SPLIT, 1)
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
if len(TokenInfo) != 2:
RaiseParserError(Item, InfType, File, Format, LineNo)
else:
TokenGuid = TokenInfo[0]
TokenName = TokenInfo[1]
if len(List) > 1:
Value = List[1]
else:
Value = None
return (TokenGuid, TokenName, Value, InfType)
## Get Pcd Values of Dec
#
# Get Pcd of Dec as <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
# @param Item: Pcd item
# @param Type: Pcd type
# @param File: Dec file
# @param LineNo: Line number
#
def GetPcdOfDec(Item, Type, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
List = GetSplitValueList(Item)
if len(List) != 4:
RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
else:
Value = List[1]
DatumType = List[2]
Token = List[3]
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
if len(TokenInfo) != 2:
RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
else:
TokenGuid = TokenInfo[0]
TokenName = TokenInfo[1]
return (TokenGuid, TokenName, Value, DatumType, Token, Type)
## Parse DEFINE statement
#
# Get DEFINE macros
#
# @param LineValue: A DEFINE line value
# @param StartLine: A DEFINE start line
# @param Table: A table
# @param FileID: File ID
# @param Filename: File name
# @param SectionName: DEFINE section name
# @param SectionModel: DEFINE section model
# @param Arch: DEFINE arch
#
def ParseDefine(LineValue, StartLine, Table, FileID, SectionName, \
SectionModel, Arch):
Logger.Debug(Logger.DEBUG_2, ST.MSG_DEFINE_STATEMENT_FOUND % (LineValue, \
SectionName))
Define = \
GetSplitValueList(CleanString\
(LineValue[LineValue.upper().\
find(DataType.TAB_DEFINE.upper() + ' ') + \
len(DataType.TAB_DEFINE + ' ') : ]), \
DataType.TAB_EQUAL_SPLIT, 1)
Table.Insert(DataType.MODEL_META_DATA_DEFINE, Define[0], Define[1], '', \
'', '', Arch, SectionModel, FileID, StartLine, -1, \
StartLine, -1, 0)
## InsertSectionItems
#
# Insert item data of a section to a dict
#
# @param Model: A model
# @param CurrentSection: Current section
# @param SectionItemList: Section item list
# @param ArchList: Arch list
# @param ThirdList: Third list
# @param RecordSet: Record set
#
def InsertSectionItems(Model, SectionItemList, ArchList, \
ThirdList, RecordSet):
#
# Insert each item data of a section
#
for Index in range(0, len(ArchList)):
Arch = ArchList[Index]
Third = ThirdList[Index]
if Arch == '':
Arch = DataType.TAB_ARCH_COMMON
Records = RecordSet[Model]
for SectionItem in SectionItemList:
LineValue, StartLine, Comment = SectionItem[0], \
SectionItem[1], SectionItem[2]
Logger.Debug(4, ST.MSG_PARSING % LineValue)
#
# And then parse DEFINE statement
#
if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
continue
#
# At last parse other sections
#
IdNum = -1
Records.append([LineValue, Arch, StartLine, IdNum, Third, Comment])
if RecordSet != {}:
RecordSet[Model] = Records
## GenMetaDatSectionItem
#
# @param Key: A key
# @param Value: A value
# @param List: A list
#
def GenMetaDatSectionItem(Key, Value, List):
if Key not in List:
List[Key] = [Value]
else:
List[Key].append(Value)
## GetPkgInfoFromDec
#
# get package name, guid, version info from dec files
#
# @param Path: File path
#
def GetPkgInfoFromDec(Path):
PkgName = None
PkgGuid = None
PkgVersion = None
Path = Path.replace('\\', '/')
if not os.path.exists(Path):
Logger.Error("\nUPT", FILE_NOT_FOUND, File=Path)
if Path in gPKG_INFO_DICT:
return gPKG_INFO_DICT[Path]
try:
DecParser = None
if Path not in GlobalData.gPackageDict:
DecParser = Dec(Path)
GlobalData.gPackageDict[Path] = DecParser
else:
DecParser = GlobalData.gPackageDict[Path]
PkgName = DecParser.GetPackageName()
PkgGuid = DecParser.GetPackageGuid()
PkgVersion = DecParser.GetPackageVersion()
gPKG_INFO_DICT[Path] = (PkgName, PkgGuid, PkgVersion)
return PkgName, PkgGuid, PkgVersion
except FatalError:
return None, None, None
## GetWorkspacePackage
#
# Get a list of workspace package information.
#
def GetWorkspacePackage():
DecFileList = []
WorkspaceDir = GlobalData.gWORKSPACE
for Root, Dirs, Files in os.walk(WorkspaceDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for FileSp in Files:
if FileSp.startswith('.'):
continue
Ext = os.path.splitext(FileSp)[1]
if Ext.lower() in ['.dec']:
DecFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
#
# abstract package guid, version info from DecFile List
#
PkgList = []
for DecFile in DecFileList:
(PkgName, PkgGuid, PkgVersion) = GetPkgInfoFromDec(DecFile)
if PkgName and PkgGuid and PkgVersion:
PkgList.append((PkgName, PkgGuid, PkgVersion, DecFile))
return PkgList
## GetWorkspaceModule
#
# Get a list of workspace modules.
#
def GetWorkspaceModule():
InfFileList = []
WorkspaceDir = GlobalData.gWORKSPACE
for Root, Dirs, Files in os.walk(WorkspaceDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
if 'Build' in Dirs:
Dirs.remove('Build')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for FileSp in Files:
if FileSp.startswith('.'):
continue
Ext = os.path.splitext(FileSp)[1]
if Ext.lower() in ['.inf']:
InfFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
return InfFileList
## MacroParser used to parse macro definition
#
# @param Line: The content contain linestring and line number
# @param FileName: The meta-file file name
# @param SectionType: Section for the Line belong to
# @param FileLocalMacros: A list contain Macro defined in [Defines] section.
#
def MacroParser(Line, FileName, SectionType, FileLocalMacros):
MacroDefPattern = re.compile("^(DEFINE)[ \t]+")
LineContent = Line[0]
LineNo = Line[1]
Match = MacroDefPattern.match(LineContent)
if not Match:
#
# Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
#
return None, None
TokenList = GetSplitValueList(LineContent[Match.end(1):], \
DataType.TAB_EQUAL_SPLIT, 1)
#
# Syntax check
#
if not TokenList[0]:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACRONAME_NOGIVEN,
ExtraData=LineContent, File=FileName, Line=LineNo)
if len(TokenList) < 2:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACROVALUE_NOGIVEN,
ExtraData=LineContent, File=FileName, Line=LineNo)
Name, Value = TokenList
#
# DEFINE defined macros
#
if SectionType == DataType.MODEL_META_DATA_HEADER:
FileLocalMacros[Name] = Value
ReIsValidMacroName = re.compile(r"^[A-Z][A-Z0-9_]*$", re.DOTALL)
if ReIsValidMacroName.match(Name) == None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACRONAME_INVALID % (Name),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
# Validate MACRO Value
#
# <MacroDefinition> ::= [<Comments>]{0,}
# "DEFINE" <MACRO> "=" [{<PATH>} {<VALUE>}] <EOL>
# <Value> ::= {<NumVal>} {<Boolean>} {<AsciiString>} {<GUID>}
# {<CString>} {<UnicodeString>} {<CArray>}
#
# The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
# <UnicodeString>, <CArray> are subset of <AsciiString>.
#
ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
if ReIsValidMacroValue.match(Value) == None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACROVALUE_INVALID % (Value),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
return Name, Value
## GenSection
#
# generate section contents
#
# @param SectionName: indicate the name of the section, details refer to
# INF, DEC specs
# @param SectionDict: section statement dict, key is SectionAttrs(arch,
# moduletype or platform may exist as needed) list
# seperated by space,
# value is statement
#
def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
Content = ''
for SectionAttrs in SectionDict:
StatementList = SectionDict[SectionAttrs]
if SectionAttrs and SectionName != 'Defines' and SectionAttrs.strip().upper() != DataType.TAB_ARCH_COMMON:
if SplitArch:
ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_SPACE_SPLIT)
else:
if SectionName != 'UserExtensions':
ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_COMMENT_SPLIT)
else:
ArchList = [SectionAttrs]
for Index in xrange(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
Section = '[' + SectionName + '.' + (', ' + SectionName + '.').join(ArchList) + ']'
else:
Section = '[' + SectionName + ']'
Content += '\n' + Section + '\n'
if StatementList != None:
for Statement in StatementList:
LineList = Statement.split('\n')
NewStatement = ""
for Line in LineList:
# ignore blank comment
if not Line.replace("#", '').strip() and SectionName not in ('Defines', 'Hob', 'Event', 'BootMode'):
continue
# add two space before non-comments line except the comments in Defines section
if Line.strip().startswith('#') and SectionName == 'Defines':
NewStatement += "%s\n" % Line
continue
NewStatement += " %s\n" % Line
if NeedBlankLine:
Content += NewStatement + '\n'
else:
Content += NewStatement
if NeedBlankLine:
Content = Content[:-1]
if not Content.replace('\\n', '').strip():
return ''
return Content
## ConvertArchForInstall
# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
# Anything else, the case must be preserved
#
# @param Arch: the arch string that need to be converted, it should be stripped before pass in
# @return: the arch string that get converted
#
def ConvertArchForInstall(Arch):
if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
DataType.TAB_ARCH_IPF, DataType.TAB_ARCH_EBC]:
Arch = Arch.upper()
elif Arch.upper() == DataType.TAB_ARCH_COMMON:
Arch = Arch.lower()
return Arch
| 39.103042 | 121 | 0.580937 |
ace3e156d72b6f412075bb9befa64451f7d3d7a0 | 1,036 | py | Python | tests/test_colorautoadjust.py | huideyeren/anshitsu | d584759cd50717131f9753627621d9a6aa425f67 | [
"MIT"
] | null | null | null | tests/test_colorautoadjust.py | huideyeren/anshitsu | d584759cd50717131f9753627621d9a6aa425f67 | [
"MIT"
] | null | null | null | tests/test_colorautoadjust.py | huideyeren/anshitsu | d584759cd50717131f9753627621d9a6aa425f67 | [
"MIT"
] | null | null | null | import os
from anshitsu import retouch
from PIL import Image
def test_colorautoadjust_by_rgb():
image = Image.open(
os.path.join(".", "tests", "pic", "dog.jpg"),
)
rt = retouch.Retouch(image=image, colorautoadjust=True)
gray = rt.process()
assert gray.mode == "RGB"
def test_colorautoadjust_by_grayscale():
image = Image.open(
os.path.join(".", "tests", "pic", "tokyo_station.jpg"),
)
rt = retouch.Retouch(image=image, colorautoadjust=True)
gray = rt.process()
assert gray.mode == "L"
def test_colorautoadjust_by_rgba():
image = Image.open(
os.path.join(".", "tests", "pic", "nullpo.png"),
)
rt = retouch.Retouch(image=image, colorautoadjust=True)
gray = rt.process()
assert gray.mode == "RGB"
def test_colorautoadjust_by_grayscale_with_alpha():
image = Image.open(
os.path.join(".", "tests", "pic", "test.png"),
)
rt = retouch.Retouch(image=image, colorautoadjust=True)
gray = rt.process()
assert gray.mode == "L"
| 25.268293 | 63 | 0.63417 |
ace3e3f23d3b2634bebc98d0bff76316e50cdfe9 | 16,469 | py | Python | lib/datasets/pascal_voc.py | Flsahkong/seeDiffDA | 8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42 | [
"MIT"
] | 62 | 2018-10-27T02:44:46.000Z | 2022-03-09T12:58:52.000Z | lib/datasets/pascal_voc.py | Flsahkong/seeDiffDA | 8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42 | [
"MIT"
] | 12 | 2018-11-16T11:22:40.000Z | 2020-06-07T06:08:10.000Z | lib/datasets/pascal_voc.py | Flsahkong/seeDiffDA | 8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42 | [
"MIT"
] | 24 | 2018-10-27T02:44:29.000Z | 2021-07-12T08:49:17.000Z | from __future__ import print_function
from __future__ import absolute_import
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import scipy.sparse
import subprocess
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
import pickle
from .imdb import imdb
from .imdb import ROOT_DIR
from . import ds_utils
from .voc_eval import voc_eval
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from model.utils.config import cfg
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
# <<<< obsolete
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
"""
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor',
'rider', 'motorcycle', 'truck')
self._classes = ('__background__', 'person', 'rider',
'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle')
self._classes = ('__background__', 'bobcat', 'opossum',
'coyote', 'raccoon', 'bird', 'dog',
'cat', 'squirrel', 'rabbit', 'skunk',
'rodent', 'badger', 'deer', 'car', 'fox')
self._classes = ('__background__', 'leopard', 'sambar',
'tiger', 'chital')
"""
self._classes = ('__background__', 'tiger')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
print(self.classes)
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
# self._roidb_handler = self.selective_search_roidb
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
print('sample image from dataset:', image_index[0])
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
"""
@Blocking caching due to various reasons...
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
"""
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
"""
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
"""
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
"""
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
"""
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
"""
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
"""
@Edit x1, y1 distances maybe flipping.
"""
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
#x1 = float(bbox.find('xmin').text)
#y1 = float(bbox.find('ymin').text)
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
#x2 = float(bbox.find('xmax').text)
#y2 = float(bbox.find('ymax').text)
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year)
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.60,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| 39.588942 | 80 | 0.535855 |
ace3e47bdba86b496501f665bc8ea99a96a97ef9 | 9,962 | py | Python | model_zoo/official/cv/maskrcnn/src/util.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | 1 | 2021-07-03T06:52:20.000Z | 2021-07-03T06:52:20.000Z | model_zoo/official/cv/maskrcnn/src/util.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | model_zoo/official/cv/maskrcnn/src/util.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""coco eval for maskrcnn"""
import json
import numpy as np
import mmcv
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
from .model_utils.config import config
_init_value = np.array(0.0)
summary_init = {
'Precision/mAP': _init_value,
'Precision/mAP@.50IOU': _init_value,
'Precision/mAP@.75IOU': _init_value,
'Precision/mAP (small)': _init_value,
'Precision/mAP (medium)': _init_value,
'Precision/mAP (large)': _init_value,
'Recall/AR@1': _init_value,
'Recall/AR@10': _init_value,
'Recall/AR@100': _init_value,
'Recall/AR@100 (small)': _init_value,
'Recall/AR@100 (medium)': _init_value,
'Recall/AR@100 (large)': _init_value,
}
def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000), single_result=False):
"""coco eval for maskrcnn"""
anns = json.load(open(result_files['bbox']))
if not anns:
return summary_init
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
gt_img_ids = coco.getImgIds()
det_img_ids = coco_dets.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
tgt_ids = gt_img_ids if not single_result else det_img_ids
if single_result:
res_dict = dict()
for id_i in tgt_ids:
cocoEval = COCOeval(coco, coco_dets, iou_type)
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.params.imgIds = [id_i]
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
res_dict.update({coco.imgs[id_i]['file_name']: cocoEval.stats[1]})
cocoEval = COCOeval(coco, coco_dets, iou_type)
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.params.imgIds = tgt_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
summary_metrics = {
'Precision/mAP': cocoEval.stats[0],
'Precision/mAP@.50IOU': cocoEval.stats[1],
'Precision/mAP@.75IOU': cocoEval.stats[2],
'Precision/mAP (small)': cocoEval.stats[3],
'Precision/mAP (medium)': cocoEval.stats[4],
'Precision/mAP (large)': cocoEval.stats[5],
'Recall/AR@1': cocoEval.stats[6],
'Recall/AR@10': cocoEval.stats[7],
'Recall/AR@100': cocoEval.stats[8],
'Recall/AR@100 (small)': cocoEval.stats[9],
'Recall/AR@100 (medium)': cocoEval.stats[10],
'Recall/AR@100 (large)': cocoEval.stats[11],
}
return summary_metrics
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def bbox2result_1image(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
result = [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)]
else:
result = [bboxes[labels == i, :] for i in range(num_classes - 1)]
return result
def proposal2json(dataset, results):
"""convert proposal to json mode"""
img_ids = dataset.getImgIds()
json_results = []
dataset_len = dataset.get_dataset_size()*2
for idx in range(dataset_len):
img_id = img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
"""convert det to json mode"""
cat_ids = dataset.getCatIds()
img_ids = dataset.getImgIds()
json_results = []
dataset_len = len(img_ids)
for idx in range(dataset_len):
img_id = img_ids[idx]
if idx == len(results): break
result = results[idx]
for label, result_label in enumerate(result):
bboxes = result_label
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
"""convert segm to json mode"""
cat_ids = dataset.getCatIds()
img_ids = dataset.getImgIds()
bbox_json_results = []
segm_json_results = []
dataset_len = len(img_ids)
assert dataset_len == len(results)
for idx in range(dataset_len):
img_id = img_ids[idx]
if idx == len(results): break
det, seg = results[idx]
for label, det_label in enumerate(det):
bboxes = det_label
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = cat_ids[label]
bbox_json_results.append(data)
if len(seg) == 2:
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(dataset, results, out_file):
"""convert result convert to json mode"""
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def get_seg_masks(mask_pred, det_bboxes, det_labels, img_meta, rescale, num_classes):
"""Get segmentation masks from mask_pred and bboxes"""
mask_pred = mask_pred.astype(np.float32)
cls_segms = [[] for _ in range(num_classes - 1)]
bboxes = det_bboxes[:, :4]
labels = det_labels + 1
ori_shape = img_meta[:2].astype(np.int32)
scale_factor = img_meta[2:].astype(np.int32)
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor[0]).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor[1]).astype(np.int32)
for i in range(bboxes.shape[0]):
bbox = (bboxes[i, :] / 1.0).astype(np.int32)
label = labels[i]
w = max(bbox[2] - bbox[0] + 1, 1)
h = max(bbox[3] - bbox[1] + 1, 1)
w = min(w, img_w - bbox[0])
h = min(h, img_h - bbox[1])
if w <= 0 or h <= 0:
print("there is invalid proposal bbox, index={} bbox={} w={} h={}".format(i, bbox, w, h))
w = max(w, 1)
h = max(h, 1)
mask_pred_ = mask_pred[i, :, :]
im_mask = np.zeros((img_h, img_w), dtype=np.uint8)
bbox_mask = mmcv.imresize(mask_pred_, (w, h))
bbox_mask = (bbox_mask > config.mask_thr_binary).astype(np.uint8)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
rle = maskUtils.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[label - 1].append(rle)
return cls_segms
| 36.490842 | 101 | 0.590042 |
ace3e4bb9c793fc02544ad626030153c45806677 | 2,122 | py | Python | build/split.py | dimidd/pycon-pandas-tutorial | 4503dcc6f2515c9cf23594eef0e99f8bb38554f1 | [
"MIT"
] | 2 | 2019-07-09T07:28:59.000Z | 2020-05-14T13:03:07.000Z | build/split.py | dimidd/pycon-pandas-tutorial | 4503dcc6f2515c9cf23594eef0e99f8bb38554f1 | [
"MIT"
] | 1 | 2020-05-03T07:24:21.000Z | 2020-05-03T07:24:40.000Z | build/split.py | dimidd/pycon-pandas-tutorial | 4503dcc6f2515c9cf23594eef0e99f8bb38554f1 | [
"MIT"
] | 3 | 2018-12-11T11:57:52.000Z | 2020-05-14T13:03:14.000Z | import glob
import json
def code_cell(source=None):
return {
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": [] if source is None else source,
}
def question_cell(text):
return {
"cell_type": "markdown",
"metadata": {},
"source": '### ' + text.strip(),
}
# def main():
# session_cells = {n: [] for n in range(1, 6 + 1)}
# f = open(os.path.dirname(os.path.abspath(__file__)) + '/../All.ipynb')
# j = json.load(f)
# cells = j['cells']
# for cell in cells:
# source = ''.join(cell['source'])
# m = re.search(r'# +(\d+)\. ', source.strip())
# if not m:
# continue
# n = int(m.group(1))
# session_cells[n].append(cell)
# for n, cells in sorted(session_cells.items()):
# print('Session {}: {} cells'.format(n, len(cells)))
def convert(filename):
f = open(filename)
j = json.load(f)
j['cells'] = list(filter_cells(filename, j['cells']))
assert 'Solutions' in filename
with open(filename.replace('Solutions', 'Exercises'), 'w') as f:
f.write(json.dumps(j, indent=2))
def filter_cells(filename, cells):
n = 0
starting = True
for cell in cells:
if cell['cell_type'] != 'code':
continue
source = ''.join(cell['source'])
if starting:
if not source.startswith('# '):
yield code_cell(cell['source'])
else:
starting = False
if not source.startswith('# '):
continue
question = []
for line in cell['source']:
if not line.startswith('# '):
break
question.append(line[2:].strip())
question = ' '.join(question)
yield question_cell(question)
yield code_cell()
yield code_cell()
n += 1
print('{:6} {}'.format(n, filename))
def main2():
for filename in sorted(glob.glob('../Solutions-*.ipynb')):
convert(filename)
if __name__ == '__main__':
main2()
| 23.577778 | 76 | 0.517908 |
ace3e4c5e18dd43e37a8e433e4a4dfd50cf601ca | 181,760 | py | Python | pandas/io/tests/test_pytables.py | kjordahl/pandas | e660c058a662426afc4d8855aabf4677f01b4a4c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2016-07-14T20:35:55.000Z | 2016-07-14T20:35:55.000Z | pandas/io/tests/test_pytables.py | jonathanrocher/pandas | e660c058a662426afc4d8855aabf4677f01b4a4c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/io/tests/test_pytables.py | jonathanrocher/pandas | e660c058a662426afc4d8855aabf4677f01b4a4c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex, TimedeltaIndex, isnull)
from pandas.io.pytables import _tables, TableIterator
try:
_tables()
except ImportError as e:
raise nose.SkipTest(e)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
from numpy.testing.decorators import slow
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w,f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class TestHDFStore(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestHDFStore, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
super(TestHDFStore, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
def test_factory_fun(self):
path = create_tempfile(self.path)
try:
with get_store(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with get_store(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(path, key,**kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path,'table',append=True)
result = read_hdf(path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=True,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# append to False
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# formats
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format=None)
assert_frame_equal(store.select('df'),df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')
#File path doesn't exist
path = ""
self.assertRaises(IOError, read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
_maybe_remove(store,'df')
store.put('df',df)
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2',df)
pandas.set_option('io.hdf.default_format','table')
_maybe_remove(store,'df')
store.put('df',df)
self.assertTrue(store.get_storer('df').is_table)
_maybe_remove(store,'df2')
store.append('df2',df)
self.assertTrue(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format',None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
df.to_hdf(path,'df')
with get_store(path) as store:
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)
pandas.set_option('io.hdf.default_format','table')
df.to_hdf(path,'df3')
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df3').is_table)
df.to_hdf(path,'df4',append=True)
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format',None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
self.assertTrue(set(
store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001,1,2,0,0)
df['datetime2'] = datetime.datetime(2001,1,3,0,0)
df.ix[3:6,['obj1']] = np.nan
df = df.consolidate().convert_objects()
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.create_group(store._handle.root,'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df',df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assertIn('a', store)
self.assertIn('b', store)
self.assertNotIn('c', store)
self.assertIn('foo/bar', store)
self.assertIn('/foo/bar', store)
self.assertNotIn('/foo/b', store)
self.assertNotIn('bar', store)
# GH 2694
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assertIn('node())', store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assertEqual(store.root.a._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.b._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.df1._v_attrs.pandas_version, '0.15.2')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no version
# info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r','r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path,mode=mode)
self.assertEqual(store._handle.mode, mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r','r+']:
def f():
with HDFStore(path,mode=mode) as store:
pass
self.assertRaises(IOError, f)
else:
with HDFStore(path,mode=mode) as store:
self.assertEqual(store._handle.mode, mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r','r+']:
self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)
df.to_hdf(path,'df',mode='w')
else:
df.to_hdf(path,'df',mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)
else:
result = read_hdf(path,'df',mode=mode)
assert_frame_equal(result,df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assertFalse(store.is_open)
# truncation ok here
store.open('w')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 0)
store.close()
self.assertFalse(store.is_open)
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'r')
store.close()
self.assertFalse(store.is_open)
# reopen as append
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
# reopen as append (again)
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)
store['df'] = df
store.append('df2',df)
tm.assert_frame_equal(store['df'],df)
tm.assert_frame_equal(store['df2'],df)
store.close()
# the file should not have actually been written
self.assertFalse(os.path.exists(path))
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store,'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode','path','handle','complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode','path','handle','complib']:
getattr(store,"_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df',df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected,df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),
'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),
'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),
'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},
index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assertIsNone(result.name)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assertIsNone(result.name)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assertEqual(result.name, ns.name)
# select on the values
expected = ns[ns>60]
result = store.select('ns',Term('foo>60'))
tm.assert_series_equal(result,expected)
# select on the index and values
expected = ns[(ns>70) & (ns.index<90)]
result = store.select('ns',[Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result,expected)
# multi-index
mi = DataFrame(np.random.randn(5,1),columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5,'C'] = 'bar'
mi.set_index(['C','B'],inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format,index):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df',df,format=format)
assert_frame_equal(df,store['df'])
for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
tm.makeDateIndex ]:
check('table',index)
check('fixed',index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed',tm.makePeriodIndex)
#check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table',index)
check('fixed',index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
check('fixed',index)
def test_encoding(self):
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
df.loc[3,'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),
'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15,['A1','B','D','E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:,'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:,'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:,'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20)},
index=np.arange(20))
df.ix[0:15,:] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table',False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table',True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar'},
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
#GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df['index'] = range(10)
df['index'] += i*10
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1]*len(df),dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index',inplace=True)
store.append('df',df)
# test a different ordering but with more fields (like invalid combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
store.append('df',df)
# store additonal fields in different blocks
df['int16_2'] = Series([1]*len(df),dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.]*len(df),dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assertTrue(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B','A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:,'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.ix[0,'A'] = 1.
df_new.ix[0,'B'] = -1.
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
df_new['string2'] = 'foo'
df_new.loc[2:5,'string2'] = np.nan
df_new.loc[7:8,'string2'] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc.convert_objects()
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc, data_columns=['B', 'C',
'string', 'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
result = store.select('df_dc', [ Term('B>0') ])
expected = df_dc[df_dc.B>0]
tm.assert_frame_equal(result,expected)
result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1',p)
tm.assert_panel_equal(store.select('p1'),p)
store.append('p2',p,data_columns=True)
tm.assert_panel_equal(store.select('p2'),p)
result = store.select('p2',where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
tm.assert_frame_equal(result.to_frame(),expected)
result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
tm.assert_frame_equal(result.to_frame(),expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t,column):
return getattr(store.get_storer(t).table.cols,column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path,'df',format='table')
result = read_hdf(path,'df',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
df = DataFrame(np.arange(12).reshape(3,4), columns=index)
with ensure_clean_store(self.path) as store:
store.put('df',df)
tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df,df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
with ensure_clean_store(self.path) as store:
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s',s)
xp = Series(np.zeros(12), index=make_index(['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
self.assertRaises(ValueError, store.append, 'df',df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
self.assertRaises(ValueError, store.append, 'df',df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select('df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"),s)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df',df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsuported data types
self.assertRaises(TypeError, store.put,'abc',None)
self.assertRaises(TypeError, store.put,'abc','123')
self.assertRaises(TypeError, store.put,'abc',123)
self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path,mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result,obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df',df_empty)
self.assertRaises(KeyError,store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10,3),columns=list('ABC'))
store.append('df',df)
assert_frame_equal(store.select('df'),df)
store.append('df',df_empty)
assert_frame_equal(store.select('df'),df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2',df)
assert_frame_equal(store.select('df2'),df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p',p_empty)
self.assertRaises(KeyError,store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3,4,5),items=list('ABC'))
store.append('p',p)
assert_panel_equal(store.select('p'),p)
store.append('p',p_empty)
assert_panel_equal(store.select('p'),p)
# store
store.put('p2',p_empty)
assert_panel_equal(store.select('p2'),p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001,1,2),index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes,store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes,store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes,store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in
['float32','float64','int32','int64','int16','int8'] ]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,
'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,
'datetime64[ns]' : 2})
result.sort()
expected.sort()
tm.assert_series_equal(result,expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
### currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
def compare(a,b):
tm.assert_frame_equal(a,b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
store.append('df_tz',df,data_columns=['A'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# select with tz aware
compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5))
store.append('df_tz',df)
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz',df,data_columns=['A','B'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# can't append with diff timezone
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df',df)
result = store.select('df')
assert_frame_equal(result,df)
_maybe_remove(store, 'df')
store.append('df',df)
result = store.select('df')
assert_frame_equal(result,df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01', datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pandas.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed',s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table',s)
result = store.select('table')
assert_series_equal(result, s)
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
tm._skip_if_no_dateutil()
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues.
from pandas.tslib import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
def compare(a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
# select with tz aware
compare(store.select('df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5))
store.append('df_tz', df)
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
# timezone setting not supported on windows
tm._skip_if_windows()
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ']=tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))
df['C'] = df['A']-df['B']
df.ix[3:5,'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df',df,data_columns=True)
result = store.select('df')
assert_frame_equal(result,df)
result = store.select('df',Term("C<100000"))
assert_frame_equal(result,df)
result = store.select('df',Term("C","<",-3*86400))
assert_frame_equal(result,df.iloc[3:])
result = store.select('df',"C<'-3D'")
assert_frame_equal(result,df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df',"C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result,df.iloc[6:])
result = store.select('df',"C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result,df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2',df)
result = store.select('df2')
assert_frame_equal(result,df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEqual(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEqual(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEqual(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEqual(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEqual(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
self.assertTrue(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
self.assertTrue(n == 120-32)
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32//4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
self.assertTrue(n == 32)
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32//4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
self.assertTrue(n == 32)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32//4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
self.assertTrue(n == 120-32)
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32//4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
self.assertTrue(n == 120-32)
result = store.select('wp5')
expected = wp.reindex(major_axis=wp.major_axis[:16//4].union(wp.major_axis[-16//4:]))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
self.assertTrue(n == 0)
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
date = wp.major_axis.take(np.arange(0,30,3))
crit = Term('major_axis=date')
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
self.assertTrue(n == 28)
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(wp.major_axis[np.arange(0,20,3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
self.assertTrue(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
self.assertTrue(n == 56)
n = store.remove('wp', where=[crit2])
self.assertTrue(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])).difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4,'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df','df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df','index>')
self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table',data_columns=True)
# check ok
read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path,'dfq',where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table')
self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, table=True)
store.put('p4d', p4d, table=True)
store.put('wpneg', wpneg, table=True)
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [
dict(field='major_axis', op='>', value='20121114'),
[ dict(field='major_axis', op='>', value='20121114') ],
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
with tm.assertRaisesRegexp(TypeError, 'Only named functions are supported'):
store.select('wp', Term('major_axis == (lambda x: x)("20130101")'))
# check USub node parsing
res = store.select('wpneg', Term('items == -1'))
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assertRaisesRegexp(NotImplementedError,
'Unary addition not supported'):
store.select('wpneg', Term('items == +1'))
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_backwards_compat_without_term_object(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', ('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0),
datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
store.put('df', df, table=True)
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
from datetime import datetime
# technically an error, but allow it
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
result = store.select('df','index>datetime(2013,1,5)')
assert_frame_equal(result,expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assertTrue(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=['A'])
expected = df.loc[:,['A']]
assert_frame_equal(result,expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['A']]
result = store.select('df',columns=['A'])
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['B','A']]
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df',df)
store.append('df',df)
expected = df.loc[:,['B','A']]
expected = concat([expected, expected])
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(
np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5,2), columns =['A','B'])
df['object'] = 'foo'
df.ix[4:5,'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns = True)
expected = df[df.boolv == True].reindex(columns=['A','boolv'])
for v in [True,'true',1]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False ].reindex(columns=['A','boolv'])
for v in [False,'false',0]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
store.append('df1',df,data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values']>2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values']>2.0]
store.append('df2',df,data_columns=True,index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
#store.append('df3',df,data_columns=True)
#result = store.select(
# 'df3', where='values>2.0')
#tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values']>2.0]
store.append('df4',df,data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]
tm.assert_frame_equal(expected, result)
selector = range(100,200)
result = store.select('df', [Term('B=selector')])
expected = df[ df.B.isin(selector) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[ df.ts.isin(selector.values) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [ s for s in store.select('df',iterator=True) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=100) ]
self.assertEqual(len(results), 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df_non_table')
self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
results = [ s for s in read_hdf(path,'df',chunksize=100) ]
result = concat(results)
self.assertEqual(len(results), 5)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1',df1,data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2',df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [ s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
#expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
#results = []
#for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
# results.append(s)
#result = concat(results)
#tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [ s for s in store.select('df',chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
self.assertEqual(0, len(results))
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize=int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize-1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be 1, is 10
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be []
tm.assert_equal(0, len(results))
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(A = Series(lrange(3),
index=date_range('2000-1-1',periods=3,freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store,'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df,result)
for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assertEqual(getattr(getattr(df,idx),attr,None),
getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(A = Series(lrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
self.assertIsNone(store.get_storer('data').info['index']['freq'])
# this is ok
_maybe_remove(store,'df2')
df2 = DataFrame(dict(A = Series(lrange(3),
index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))
store.append('df2',df2)
df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('df2',df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
idx = date_range('2000-1-1',periods=3,freq='H')
idx.name = 'foo'
df = DataFrame(dict(A = Series(lrange(3), index=idx)))
df.to_hdf(path,'data',mode='w',append=True)
self.assertEqual(read_hdf(path,'data').index.name, 'foo')
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
idx2 = date_range('2001-1-1',periods=3,freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))
df2.to_hdf(path,'data',append=True)
self.assertIsNone(read_hdf(path,'data').index.name)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df,format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
self.assertEqual(crit1.env.scope['date'], date)
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True, data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string!='bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:,df.columns.difference(['A','B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select('df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({ 'A' : [1,1,2,2,3] })
parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])
selection = read_hdf(pp,'df',where='A=[2,3]')
hist = DataFrame(np.random.randn(25,1),columns=['data'],
index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],
names=['l1','l2']))
hist.to_hdf(hh,'df',mode='w',format='table')
expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))
# list like
result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist()
# sccope with list like
store = HDFStore(hh)
result = store.select('df',where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh,'df',where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index
result = read_hdf(hh,'df',where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df',where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True)
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7,'x'] = ''
store.append('df',df,data_columns=['x'])
result = store.select('df',Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result,expected)
try:
result = store.select('df',Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result,expected)
except Exception as detail:
com.pprint_thing("[{0}]".format(detail))
com.pprint_thing(store)
com.pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x=='','x'] = np.nan
store.append('df2',df2,data_columns=['x'])
result = store.select('df2',Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result,expected)
# int ==/!=
df['int'] = 1
df.ix[2:7,'int'] = 2
store.append('df3',df,data_columns=['int'])
result = store.select('df3',Term('int=2'))
expected = df[df.int==2]
assert_frame_equal(result,expected)
result = store.select('df3',Term('int!=2'))
expected = df[df.int!=2]
assert_frame_equal(result,expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where = ['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assertIsInstance(result, Series)
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all() == True)
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all() == True)
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all() == True)
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assertIsInstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))
store.append('df',df)
c = store.select_column('df','index')
where = c[DatetimeIndex(c).month==5].index
expected = df.iloc[where]
# locations
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# boolean
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)
# selection with filter
selection = date_range('20000101',periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result,expected)
# list
df = DataFrame(np.random.randn(10,2))
store.append('df2',df)
result = store.select('df2',where=[0,3,5])
expected = df.iloc[[0,3,5]]
tm.assert_frame_equal(result,expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2',where=where)
expected = df.loc[where]
tm.assert_frame_equal(result,expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result,expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df2'], where=['A>0', 'B>0'], selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_nan_selection_bug_4858(self):
# GH 4858; nan selection bug, only works for pytables >= 3.1
if LooseVersion(tables.__version__) < '3.1.0':
raise nose.SkipTest('tables version does not support fix for nan selection bug: GH 4858')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols = range(6), values = range(6)), dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols = ['13.0','14.0','15.0'], values = [3.,4.,5.]), index=[3,4,5])
# write w/o the index on that particular column
store.append('df',df, data_columns=True,index=['cols'])
result = store.select('df',where='values>2.0')
assert_frame_equal(result,expected)
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
crit = Term('columns=df.columns[:75:2]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75:2]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
# single
store = HDFStore(path)
self.assertNotIn('CLOSED', str(store))
self.assertTrue(store.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assertNotIn('CLOSED', str(store1))
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store1.is_open)
self.assertTrue(store2.is_open)
store1.close()
self.assertIn('CLOSED', str(store1))
self.assertFalse(store1.is_open)
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store2.is_open)
store2.close()
self.assertIn('CLOSED', str(store1))
self.assertIn('CLOSED', str(store2))
self.assertFalse(store1.is_open)
self.assertFalse(store2.is_open)
# nested close
store = HDFStore(path,mode='w')
store.append('df',df)
store2 = HDFStore(path)
store2.append('df2',df)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
# double closing
store = HDFStore(path,mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda : 'df' in store)
self.assertRaises(ClosedFileError, lambda : len(store))
self.assertRaises(ClosedFileError, lambda : store['df'])
self.assertRaises(ClosedFileError, lambda : store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
finally:
safe_close(store)
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
finally:
safe_close(store)
def test_legacy_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')
store['a']
store['b']
store['c']
store['d']
finally:
safe_close(store)
def test_legacy_table_read(self):
# legacy table types
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
finally:
safe_close(store)
def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
str(store)
for k in store.keys():
store.select(k)
finally:
safe_close(store)
def test_legacy_0_11_read(self):
# legacy from 0.11
try:
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
store = HDFStore(tm.get_data_path(path), 'r')
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
finally:
safe_close(store)
def test_copy(self):
def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assertEqual(set(keys), set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assertEqual(orig_t.nrows, new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assertTrue(new_t[a.name].is_indexed)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns = ['A'])
st.close()
do_copy(f = path)
do_copy(f = path, propindexes = False)
finally:
safe_remove(path)
def test_legacy_table_write(self):
raise nose.SkipTest("cannot write legacy tables")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEqual(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning,f)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b', 'test & test', 'c' , 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# basic
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
df = DataFrame({"s":s, "vals":[1,2,3,4,5,6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# dtypes
s = Series([1,1,2,2,3,4,5]).astype('category')
store.append('si',s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1,1,np.nan,2,3,4,5]).astype('category')
store.append('si2',s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2',df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# make sure the metadata is ok
self.assertTrue('/df2 ' in str(store))
self.assertTrue('/df2/meta/values_block_0/meta' in str(store))
self.assertTrue('/df2/meta/values_block_1/meta' in str(store))
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'],ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where = ['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where = ['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# appending with same categories is ok
store.append('df3', df)
df = concat([df,df])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
self.assertRaises(ValueError, lambda : store.append('df3', df3))
# remove
# make sure meta data is removed (its a recursive removal so should be)
result = store.select('df3/meta/s/meta')
self.assertIsNotNone(result)
store.remove('df3')
self.assertRaises(KeyError, lambda : store.select('df3/meta/s/meta'))
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1,2], "A": ["x","y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10,5)))
df.index = timedelta_range(start='0s',periods=10,freq='1s',name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_colums_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names+df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load)
self.assertTrue(cols2load_original == cols2load)
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [ tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex ]
types_should_run = [ tm.makeStringIndex, tm.makeCategoricalIndex ]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
types_should_fail.append(tm.makeUnicodeIndex)
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with self.assertRaises(ValueError,
msg="cannot have non-object label DataIndexableCol"):
df.to_hdf(path, 'df', format='table', data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
self.assertTrue(store.is_open)
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
self.assertTrue(isinstance(iterator, TableIterator))
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
self.assertRaises(IOError, read_hdf, store, 'df')
with open(path, mode='r') as store:
self.assertRaises(NotImplementedError, read_hdf, store, 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', complib='blosc:zlib')
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 38.281382 | 146 | 0.540366 |
ace3e50503c8df8fdca2ed8bfec67e795ae2c6e5 | 23,845 | py | Python | parser/structs/vocabs/feature_vocabs.py | Dayitva/Parser-v3 | 45754bb722fabefdb18f67ab4c32a41d24114bca | [
"Apache-2.0"
] | null | null | null | parser/structs/vocabs/feature_vocabs.py | Dayitva/Parser-v3 | 45754bb722fabefdb18f67ab4c32a41d24114bca | [
"Apache-2.0"
] | null | null | null | parser/structs/vocabs/feature_vocabs.py | Dayitva/Parser-v3 | 45754bb722fabefdb18f67ab4c32a41d24114bca | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2017 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import os
import codecs
import re
from collections import Counter, OrderedDict
from collections import defaultdict as DefaultDict
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from parser.structs.vocabs.base_vocabs import BaseVocab
from . import conllu_vocabs as cv
from parser.neural import nn, nonlin, embeddings, classifiers
#***************************************************************
class FeatureVocab(BaseVocab):
""""""
_save_str = 'feats'
#=============================================================
def __init__(self, *args, placeholder_shape=[None,None,None], **kwargs):
""""""
super(FeatureVocab, self).__init__(*args, placeholder_shape=placeholder_shape, **kwargs)
self._counts = DefaultDict(Counter)
self._str2idx = DefaultDict(dict)
self._idx2str = DefaultDict(dict)
self.PAD_STR = self.UNK_STR = self.pad_str
self.PAD_IDX = self.UNK_IDX = 0
if self.keyed:
self.ROOT_STR = 'Yes'
self.ROOT_IDX = 1
self._feats = ['Root']
self._feat_set = {'Root'}
self['Root', self.PAD_STR] = self.PAD_IDX
self['Root', self.ROOT_STR] = self.ROOT_IDX
else:
self.ROOT_STR = self.pad_str
self.ROOT_IDX = 0
self._feats = list()
self._feat_set = set()
#=============================================================
def get_input_tensor(self, embed_keep_prob=None, nonzero_init=True, variable_scope=None, reuse=True):
""""""
embed_keep_prob = 1 if reuse else (embed_keep_prob or self.embed_keep_prob)
layers = []
with tf.variable_scope(variable_scope or self.classname):
for i, feat in enumerate(self._feats):
vs_feat = str(feat).replace('[', '-RSB-').replace(']', '-LSB-')
with tf.variable_scope(vs_feat):
layer = embeddings.token_embedding_lookup(self.getlen(feat), self.embed_size,
self.placeholder[:,:,i],
nonzero_init=nonzero_init,
reuse=reuse)
layers.append(layer)
layer = tf.add_n(layers)
if embed_keep_prob < 1:
layer = self.drop_func(layer, embed_keep_prob)
return layer
#=============================================================
# TODO fix this to compute feature-level F1 rather than token-level accuracy
def get_linear_classifier(self, layer, token_weights, last_output=None, variable_scope=None, reuse=False):
""""""
if last_output:
n_layers = 0
layer = last_output['hidden_layer']
recur_layer = last_output['recur_layer']
else:
n_layers = self.n_layers
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
with tf.variable_scope(variable_scope or self.classname):
for i in six.moves.range(0, self.n_layers):
with tf.variable_scope('FC-%d' % i):
layer = classifiers.hidden(layer, self.hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('Classifier'):
probabilities = []
loss = []
predictions = []
correct_tokens = []
for i, feat in enumerate(self._feats):
vs_feat = str(feat).replace('[', '-RSB-').replace(']', '-LSB-')
with tf.variable_scope(vs_feat):
logits = classifiers.linear_classifier(layer, self.getlen(feat), hidden_keep_prob=hidden_keep_prob)
targets = self.placeholder[:,:,i]
#---------------------------------------------------
# Compute probabilities/cross entropy
# (n x m x c) -> (n x m x c)
probabilities.append(tf.nn.softmax(logits))
# (n x m), (n x m x c), (n x m) -> ()
loss.append(tf.losses.sparse_softmax_cross_entropy(targets, logits, weights=token_weights))
#---------------------------------------------------
# Compute predictions/accuracy
# (n x m x c) -> (n x m)
predictions.append(tf.argmax(logits, axis=-1, output_type=tf.int32))
# (n x m) (*) (n x m) -> (n x m)
correct_tokens.append(nn.equal(targets, predictions[-1]))
# (n x m) x f -> (n x m x f)
predictions = tf.stack(predictions, axis=-1)
# (n x m) x f -> (n x m x f)
correct_tokens = tf.stack(correct_tokens, axis=-1)
# (n x m x f) -> (n x m)
correct_tokens = tf.reduce_prod(correct_tokens, axis=-1) * token_weights
# (n x m) -> (n)
tokens_per_sequence = tf.reduce_sum(token_weights, axis=-1)
# (n x m) -> (n)
correct_tokens_per_sequence = tf.reduce_sum(correct_tokens, axis=-1)
# (n), (n) -> (n)
correct_sequences = nn.equal(tokens_per_sequence, correct_tokens_per_sequence)
#-----------------------------------------------------------
# Populate the output dictionary
outputs = {}
outputs['recur_layer'] = recur_layer
outputs['targets'] = self.placeholder
outputs['probabilities'] = probabilities
outputs['loss'] = tf.add_n(loss)
outputs['predictions'] = predictions
outputs['n_correct_tokens'] = tf.reduce_sum(correct_tokens)
outputs['n_correct_sequences'] = tf.reduce_sum(correct_sequences)
return outputs
#=============================================================
# TODO fix this to compute feature-level F1 rather than token-level accuracy
def get_bilinear_classifier_with_embeddings(self, layer, embeddings, token_weights, last_output=None, variable_scope=None, reuse=False):
""""""
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
with tf.variable_scope(variable_scope or self.classname):
for i in six.moves.range(0, self.n_layers):
with tf.variable_scope('FC-%d' % i):
layer = classifiers.hidden(layer, self.hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('Classifier'):
probabilities = []
loss = []
predictions = []
correct_tokens = []
for i, feat in enumerate(self._feats):
vs_feat = str(feat).replace('[', '-RSB-').replace(']', '-LSB-')
with tf.variable_scope(vs_feat):
logits = classifiers.batch_bilinear_classifier(
layer, embeddings, self.getlen(feat),
hidden_keep_prob=hidden_keep_prob,
add_linear=self.add_linear)
targets = self.placeholder[:,:,i]
#---------------------------------------------------
# Compute probabilities/cross entropy
# (n x m x c) -> (n x m x c)
probabilities.append(tf.nn.softmax(logits))
# (n x m), (n x m x c), (n x m) -> ()
loss.append(tf.losses.sparse_softmax_cross_entropy(targets, logits, weights=token_weights))
#---------------------------------------------------
# Compute predictions/accuracy
# (n x m x c) -> (n x m)
predictions.append(tf.argmax(logits, axis=-1, output_type=tf.int32))
# (n x m) (*) (n x m) -> (n x m)
correct_tokens.append(nn.equal(targets, predictions[-1]))
# (n x m) x f -> (n x m x f)
predictions = tf.stack(predictions, axis=-1)
# (n x m) x f -> (n x m x f)
correct_tokens = tf.stack(correct_tokens, axis=-1)
# (n x m x f) -> (n x m)
correct_tokens = tf.reduce_prod(correct_tokens, axis=-1) * token_weights
# (n x m) -> (n)
tokens_per_sequence = tf.reduce_sum(token_weights, axis=-1)
# (n x m) -> (n)
correct_tokens_per_sequence = tf.reduce_sum(correct_tokens, axis=-1)
# (n), (n) -> (n)
correct_sequences = nn.equal(tokens_per_sequence, correct_tokens_per_sequence)
#-----------------------------------------------------------
# Populate the output dictionary
outputs = {}
outputs['recur_layer'] = recur_layer
outputs['targets'] = self.placeholder
outputs['probabilities'] = probabilities
outputs['loss'] = tf.add_n(loss)
outputs['predictions'] = predictions
outputs['n_correct_tokens'] = tf.reduce_sum(correct_tokens)
outputs['n_correct_sequences'] = tf.reduce_sum(correct_sequences)
return outputs
#=============================================================
# TODO finish this
def get_bilinear_classifier(self, layer, outputs, token_weights, variable_scope=None, reuse=False):
""""""
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
with tf.variable_scope(variable_scope or self.classname):
with tf.variable_scope(variable_scope or self.classname):
for i in six.moves.range(0, self.n_layers-1):
with tf.variable_scope('FC-%d' % i):
layer = classifiers.hidden(layer, 2*hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('FC-top'):
layers = classifiers.hiddens(layer, 2*[hidden_size],
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
layer1, layer2 = layers.pop(0), layers.pop(0)
with tf.variable_scope('Classifier'):
probabilities = []
loss = []
predictions = []
correct_tokens = []
for i, feat in enumerate(self._feats):
vs_feat = str(feat).replace('[', '-RSB-').replace(']', '-LSB-')
with tf.variable_scope(vs_feat):
if self.diagonal:
logits = classifiers.diagonal_bilinear_classifier(layer1, layer2, self.getlen(feat),
hidden_keep_prob=hidden_keep_prob,
add_linear=self.add_linear)
else:
logits = classifiers.bilinear_classifier(layer1, layer2, self.getlen(feat),
hidden_keep_prob=hidden_keep_prob,
add_linear=self.add_linear)
targets = self.placeholder[:,:,i]
#---------------------------------------------------
# Compute probabilities/cross entropy
# (n x m x c) -> (n x m x c)
probabilities.append(tf.nn.softmax(logits))
# (n x m), (n x m x c), (n x m) -> ()
loss.append(tf.losses.sparse_softmax_cross_entropy(targets, logits, weights=token_weights))
#---------------------------------------------------
# Compute predictions/accuracy
# (n x m x c) -> (n x m)
predictions.append(tf.argmax(logits, axis=-1, output_type=tf.int32))
# (n x m) (*) (n x m) -> (n x m)
correct_tokens.append(nn.equal(targets, predictions[-1]))
# (n x m) x f -> (n x m x f)
predictions = tf.stack(predictions, axis=-1)
# (n x m) x f -> (n x m x f)
correct_tokens = tf.stack(correct_tokens, axis=-1)
# (n x m x f) -> (n x m)
correct_tokens = tf.reduce_prod(correct_tokens, axis=-1) * token_weights
# (n x m) -> (n)
tokens_per_sequence = tf.reduce_sum(token_weights, axis=-1)
# (n x m) -> (n)
correct_tokens_per_sequence = tf.reduce_sum(correct_tokens, axis=-1)
# (n), (n) -> (n)
correct_sequences = nn.equal(tokens_per_sequence, correct_tokens_per_sequence)
#-----------------------------------------------------------
# Populate the output dictionary
outputs = {}
outputs['recur_layer'] = recur_layer
outputs['targets'] = self.placeholder
outputs['probabilities'] = probabilities
outputs['loss'] = tf.add_n(loss)
outputs['predictions'] = predictions
outputs['n_correct_tokens'] = tf.reduce_sum(correct_tokens)
outputs['n_correct_sequences'] = tf.reduce_sum(correct_sequences)
return outputs
#=============================================================
def getlen(self, feat):
""""""
return len(self._str2idx[feat])
#=============================================================
# TODO make this compatible with zipped files
def count(self, train_conllus):
""""""
for train_conllu in train_conllus:
with codecs.open(train_conllu, encoding='utf-8', errors='ignore') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#'):
line = line.split('\t')
multitoken = line[self.conllu_idx] # conllu_idx is provided by the CoNLLUVocab
self._count(multitoken)
self.index_by_counts()
return True
def _count(self, multitoken):
if not self.cased:
multitoken = multitoken.lower()
if self.separator:
multitoken = multitoken.split(self.separator)
else:
multitoken = list(multitoken)
for i, token in enumerate(multitoken):
if token != '_':
if self.keyed or not self.cliplen or i < self.cliplen:
if self.keyed:
feat, token = token.split('=')
else:
feat = str(i)
if feat not in self._feat_set:
self._feats.append(feat)
self._feat_set.add(feat)
if token != self.PAD_STR:
self._counts[feat][token] += 1
#if token != self.PAD_STR:
# if feat not in self._feat_set:
# self._feats.append(feat)
# self._feat_set.add(feat)
# self._counts[feat][token] += 1
return
#=============================================================
def add(self, token):
""""""
return self.index(token)
#=============================================================
def token(self, index):
""""""
assert isinstance(index[0], six.integer_types + (np.int32, np.int64))
return self[index]
#=============================================================
def index(self, multitoken):
""""""
assert isinstance(multitoken, six.string_types), 'FeatureVocab.index was passed {}'.format(multitoken)
return self[multitoken]
#=============================================================
def get_root(self):
""""""
return 'Root=Yes' if self.keyed else self.separator.join([self.ROOT_STR for _ in self._feats])
#=============================================================
@staticmethod
def sorted(counter):
return sorted(counter.most_common(), key=lambda x: (-x[1], x[0]))
#=============================================================
def index_by_counts(self, dump=True):
""""""
for feat in self._feats:
self[feat, self.PAD_STR] = 0
cur_idx = 1
counter = self._counts[feat]
for token, count in self.sorted(counter):
if (not self.min_occur_count or count >= self.min_occur_count) and\
(not self.max_embed_count or cur_idx < self.max_embed_count+1):
self[feat, token] = cur_idx
cur_idx += 1
self._str2idx = dict(self._str2idx)
self._idx2str = dict(self._idx2str)
self._depth = len(self)
if self.keyed:
self._feats.sort()
else:
self._feats = [int(feat) for feat in self._feats]
self._feats.sort()
self._feats = [str(feat) for feat in self._feats]
if dump:
self.dump()
return
#=============================================================
def dump(self):
""""""
with codecs.open(self.vocab_savename, 'w', encoding='utf-8', errors='ignore') as f:
for feat in self._feats:
counter = self._counts[feat]
if feat != 'Root':
f.write(u'[{}]\n'.format(feat))
for token, count in self.sorted(counter):
f.write(u'{}\t{}\n'.format(token, count))
f.write(u'\n')
return
#=============================================================
def load(self):
""""""
# First check to see if it's saved in the save_dir, then somewhere else
dump = None
if os.path.exists(self.vocab_savename):
vocab_filename = self.vocab_savename
dump = False
elif self.vocab_loadname and os.path.exists(self.vocab_loadname):
vocab_filename = self.vocab_loadname
dump = True
else:
self._loaded = False
return False
with codecs.open(vocab_filename, encoding='utf-8', errors='ignore') as f:
for line in f:
line = line.rstrip()
if line:
featmatch = re.match('\[(.*)\]$', line) # matches '[feature]'
match = re.match('(.*)\s([0-9]+)', line) # matches 'value count'
if featmatch:
feat = featmatch.group(1)
if feat != 'Root':
self._feats.append(feat)
self._counts[feat] = Counter()
elif match:
token = match.group(1)
count = int(match.group(2))
if feat != 'Root':
self._counts[feat][token] = count
self.index_by_counts(dump=dump)
self._loaded = True
return True
#=============================================================
def __getitem__(self, key):
assert hasattr(key, '__iter__'), 'You gave FeatureVocab.__getitem__ {}'.format(key)
if isinstance(key, six.string_types):
if not self.cased:
key = key.lower()
if key == '_':
return [self.PAD_IDX for _ in self._feats]
if self.separator:
multitoken = key.split(self.separator)
else:
multitoken = list(key)
if self.keyed:
key_dict = {}
for token in multitoken:
feat, token = token.split('=')
key_dict[feat] = token
return [self._str2idx.get(feat, self._str2idx['Root']).get(key_dict[feat], self.UNK_IDX) if feat in key_dict else self.PAD_IDX for feat in self._feats]
else:
if self.cliplen:
multitoken = multitoken[:self.cliplen]
return [self._str2idx[str(feat)].get(key, self.UNK_IDX) for feat, key in enumerate(multitoken)]
elif isinstance(key[0], six.integer_types + (np.int32, np.int64)):
if self.keyed:
multitoken = ['{}={}'.format(feat, self._idx2str.get(feat, self._idx2str['Root']).get(key, self.UNK_STR)) for feat, key in zip(self._feats, key) if key != self.PAD_IDX]
else:
multitoken = [self._idx2str[str(feat)].get(key, self.UNK_STR) for feat, key in enumerate(key)]
if np.sum(key) > 0:
return self.separator.join(multitoken)
else:
return '_'
else:
return [self[k] for k in key]
def __setitem__(self, key, value):
if len(key) == 1:
assert len(value) == 1
self[key[0]] = value[0]
return
if isinstance(key[1], six.string_types):
vocab, key = key
if not self.cased and key != self.PAD_STR:
key = key.lower()
self._str2idx[vocab][key] = value
self._idx2str[vocab][value] = key
elif isinstance(key, six.integer_types + (np.int32, np.int64)):
vocab, key = key
if not self.cased and value != self.PAD_STR:
value = value.lower()
self._idx2str[vocab][key] = value
self._str2idx[vocab][value] = key
elif hasattr(key, '__iter__') and hasattr(value, '__iter__'):
for k, v in zip(key, value):
self[k] = v
else:
raise ValueError('keys and values to {}.__setitem__ must be (iterables of) (string, string or integer) tuples and string or integer')
def __contains__(self, key):
assert isinstance(key, (tuple, list))
vocab, key = key
if isinstance(key, six.string_types):
if not self.cased and key != self.PAD_STR:
key = key.lower()
return key in self._str2idx[vocab]
elif isinstance(key, six.integer_types + (np.int32, np.int64)):
return key in self._idx2str[vocab]
else:
raise ValueError('key to {}.__contains__ must be (string, string or integer) tuple')
def __len__(self):
return len(self._feats)
def __iter__(self):
return (feat for feat in self._feats)
#=============================================================
@property
def drop_func(self):
drop_func = self._config.getstr(self, 'drop_func')
if hasattr(embeddings, drop_func):
return getattr(embeddings, drop_func)
else:
raise AttributeError("module '{}' has no attribute '{}'".format(embeddings.__name__, drop_func))
@property
def embed_size(self):
return self._config.getint(self, 'embed_size')
@property
def embed_keep_prob(self):
return self._config.getfloat(self, 'embed_keep_prob')
@property
def hidden_func(self):
hidden_func = self._config.getstr(self, 'hidden_func')
if hasattr(nonlin, hidden_func):
return getattr(nonlin, hidden_func)
else:
raise AttributeError("module '{}' has no attribute '{}'".format(nonlin.__name__, hidden_func))
@property
def hidden_size(self):
return self._config.getint(self, 'hidden_size')
@property
def add_linear(self):
return self._config.getboolean(self, 'add_linear')
@property
def n_layers(self):
return self._config.getint(self, 'n_layers')
@property
def hidden_keep_prob(self):
return self._config.getfloat(self, 'hidden_keep_prob')
@property
def max_embed_count(self):
return self._config.getint(self, 'max_embed_count')
@property
def min_occur_count(self):
return self._config.getint(self, 'min_occur_count')
@property
def vocab_loadname(self):
return self._config.getstr(self, 'vocab_loadname')
@property
def vocab_savename(self):
return os.path.join(self.save_dir, self.field+'-'+self._save_str+'.lst')
@property
def keyed(self):
return self._config.getboolean(self, 'keyed')
@property
def separator(self):
separator = self._config.getstr(self, 'separator')
if separator is None:
return ''
else:
return separator
@property
def pad_str(self):
pad_str = self._config.getstr(self, 'pad_str')
if pad_str is None:
pad_str = ''
return pad_str
@property
def cased(self):
return self._config.getboolean(self, 'cased')
# TODO This was added at the eleventh hour and should be fixed
@property
def cliplen(self):
if '_cliplen' in self.__dict__:
return self._cliplen
elif self._config.has_option(self.classname, 'cliplen'):
return self._config.getint(self, 'cliplen')
else:
return 0
#***************************************************************
class LemmaFeatureVocab(FeatureVocab, cv.LemmaVocab):
pass
class XPOSFeatureVocab(FeatureVocab, cv.XPOSVocab):
pass
class UFeatsFeatureVocab(FeatureVocab, cv.UFeatsVocab):
pass
| 38.835505 | 176 | 0.564101 |
ace3e5d81ffe5f274fc37c01ee14f7df9082e39a | 8,684 | py | Python | logic_solver.py | danpolitte/puzzle-solver | b3475764b99ac188bcb2003217910baa68b547e5 | [
"MIT"
] | null | null | null | logic_solver.py | danpolitte/puzzle-solver | b3475764b99ac188bcb2003217910baa68b547e5 | [
"MIT"
] | null | null | null | logic_solver.py | danpolitte/puzzle-solver | b3475764b99ac188bcb2003217910baa68b547e5 | [
"MIT"
] | null | null | null | # logic_solver.py: a propositional logic solving system
class LogicSolver:
def __init__(self, verbose=False):
self._verbose = verbose
self._prop_eqns = []
self._knowledge = {}
def add_equation(self, proposition_list, eqn_type):
# Adds an equation: the propositions in proposition_list will be related by the relation _type_ (XOR, OR, etc)
self._prop_eqns.append(PropositionEqn(set(proposition_list), eqn_type))
def add_true_propositions(self, proposition_list):
# Marks all the propositions in the given iterable True in this solver
self.add_knowledge({i: True for i in proposition_list})
def add_false_propositions(self, proposition_list):
# Marks all the propositions in the given iterable False in this solver
self.add_knowledge({i: False for i in proposition_list})
def add_knowledge(self, proposition_to_bool_dict):
# Marks propositions True and False based on values in given dictionary
# TODO: check for contradiction with previously known things here
self._knowledge.update(proposition_to_bool_dict)
def run_iter(self):
# The magic: run a turn on this solver.
# Stop after first type of inference that allows new insights. This way, we can avoid the expensive later
# kinds whenever possible.
# Step 0: transform/reduce sets from previous info
print('Step 0 (apply previous discoveries)')
for datum, knownValue in self._knowledge.items():
for s in self._prop_eqns:
s.apply_information(datum, knownValue)
# Can we make any new inferences from this?
new_inferences = gather_all_inferences(self._prop_eqns)
# # Step 1: transformations based on comparing pairs of sets (subset-based reduction)
# if len(new_inferences) == 0: # Only if no discoveries so far
# print('Step 1 (subset-reduction)')
# new_inferences, self._prop_eqns = pair_reductions(self._prop_eqns)
#
# # Step 2: adding sets using combining inference rules (under certain conditions)
# if len(new_inferences) == 0: # Only if no discoveries so far
# print('Step 2 (triple-set combining)')
# new_inferences, self._prop_eqns = triplet_reductions(self._prop_eqns)
# Update things known
self.add_knowledge(new_inferences)
# Wrap-up: are there any depleted sets we must clean up?
self._prop_eqns = [s for s in self._prop_eqns if s.still_has_info()]
# Done with this iteration
def get_num_sets(self):
return len(self._prop_eqns)
def is_done(self):
# If this is True, no point to further iterations
return len(self._prop_eqns) == 0
def get_knowledge(self):
# Returns (pos_knowledge, neg_knowledge) tuple of all things we know so far
pos_knowledge = {k: v for k, v in self._knowledge.items() if v}
neg_knowledge = {k: v for k, v in self._knowledge.items() if not v}
return pos_knowledge, neg_knowledge
def gather_all_inferences(prop_eqns):
inferences = {}
for s in prop_eqns:
this_set_knowledge = s.get_inferences()
# TODO: this would be a good time to check whether any pair of things added here contradict each other
inferences.update(this_set_knowledge)
return inferences
"""
PropositionEqn: Represents a set of logical propositions. The propositions can be any objects which
can be inserted into a set() and the set may have one of the following initial types (with n propositions):
- XOR (# true propoositions in [1,1])
- NOR (# true propoositions in [0,0])
- AND (# true propoositions in [n,n])
- OR (# true propoositions in [1,n])
- NAND (# true propoositions in [0,n-1])
"""
class PropositionEqn:
def __init__(self, input_list, settype='xor'):
self._set = set(input_list)
prop_count = len(self._set)
if settype.lower() == 'xor':
self._truecount = list(range(1, 1+1))
elif settype.lower() == 'nor':
self._truecount = list(range(0, 0+1))
elif settype.lower() == 'and':
self._truecount = list(range(prop_count, prop_count + 1))
elif settype.lower() == 'or':
self._truecount = list(range(1, prop_count + 1))
elif settype.lower() == 'nand':
self._truecount = list(range(0, (prop_count - 1) + 1))
else:
print("Invalid PropositionEqn type", settype, "! This will probably fail soon.")
def __str__(self):
if self.is_and():
dominant_type = 'AND'
elif self.is_nor():
dominant_type = 'NOR'
elif self.is_xor():
dominant_type = 'XOR'
elif self.is_or():
dominant_type = 'OR'
elif self.is_nand():
dominant_type = 'NAND'
else:
dominant_type = None
numtrue_string = dominant_type if dominant_type \
else 'numtrue={}'.format(','.join(str(item) for item in self._truecount))
return 'PropositionEqn(count={},{}: {})'\
.format(len(self._set), numtrue_string, str(self._set))
def __repr__(self):
return str(self)
# Functions to determine what basic types apply to this eqn. (Multiple might apply)
def is_xor(self):
return len(self._truecount) == 1 and self._truecount[0] == 1
def is_nor(self):
return len(self._truecount) == 1 and self._truecount[0] == 0
def is_and(self):
prop_count = len(self._set)
return len(self._truecount) == 1 and self._truecount[0] == prop_count
def is_or(self):
prop_count = len(self._set)
our_counts = set(self._truecount)
true_or_counts = set(range(1, prop_count+1))
return our_counts == true_or_counts
def is_nand(self):
prop_count = len(self._set)
our_counts = set(self._truecount)
true_nand_counts = set(range(0, (prop_count-1)+1))
return our_counts == true_nand_counts
"""
apply_information: transforms this equation's contents to conform to the new information that a
certain proposition has a given truth value
"""
def apply_information(self, proposition, truth_value):
if proposition in self._set:
self._set.remove(proposition)
# Update possible numbers of true propositions left
if truth_value: # proposition was true
# Subtract one from everything
self._truecount = [i-1 for i in self._truecount]
# If the proposition was false, counts remain the same
# Restrict to range [0, n], the only values that make sense
prop_count = len(self._set)
self._truecount = list(filter(lambda c: 0 <= c <= prop_count, self._truecount))
# Check validity of possible numbers of true propositions left
if len(self._truecount) == 0:
print('apply_information: contradiction reached!')
# TODO: account for this gracefully
"""
get_inferences: acquire a dictionary of the form {prop: truth_value, prop2: truth_value2 ... } describing any
inferences that can be made from this set. This action removes the items used to make these inferences from the
set.
"""
def get_inferences(self):
inferences = {}
# If the set has collapsed to either an AND set or a NOR set, we can infer the values of everything left
if self.is_and():
for prop in self._set:
inferences[prop] = True
self._set.clear()
elif self.is_nor():
# Everything is False, obviously
for prop in self._set:
inferences[prop] = False
self._set.clear()
return inferences
"""
still_has_info: provides a bool describing whether this set is no longer of use and may be terminated without
loss of information.
"""
def still_has_info(self):
return len(self._set) > 0
# Exposing this class's members to make external work easier
def set(self):
return self._set
def main():
# For testing
s = LogicSolver()
s.add_equation(['p0', 'p1'], 'xor')
s.add_equation(['q0', 'q1'], 'xor')
s.add_equation(['p0', 'q1'], 'or')
s.add_true_propositions(['p1'])
for i in range(7):
print()
print(i)
print(s._knowledge)
print(s._prop_eqns)
s.run_iter()
if s.is_done():
print('Done!')
break
if __name__ == "__main__":
main()
| 35.444898 | 119 | 0.629088 |
ace3e609e64b90170f7144586189f155d3dd2220 | 8,887 | py | Python | BERT/model/bert.py | awesome-archive/LARK | 56109a622e3c8bc00c6613ec49e41859fa644fee | [
"Apache-2.0"
] | 1 | 2019-03-16T08:24:19.000Z | 2019-03-16T08:24:19.000Z | BERT/model/bert.py | wqw123/LARK | 6204b17eeb21e56ae8c750e1a9678dfc84477e3c | [
"Apache-2.0"
] | null | null | null | BERT/model/bert.py | wqw123/LARK | 6204b17eeb21e56ae8c750e1a9678dfc84477e3c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import numpy as np
import paddle.fluid as fluid
from model.transformer_encoder import encoder, pre_process_layer
class BertConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing bert model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict[key]
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
class BertModel(object):
def __init__(self,
src_ids,
position_ids,
sentence_ids,
self_attn_mask,
config,
weight_sharing=True,
use_fp16=False):
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
self._sent_types = config['type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self._weight_sharing = weight_sharing
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._dtype = "float16" if use_fp16 else "float32"
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(src_ids, position_ids, sentence_ids, self_attn_mask)
def _build_model(self, src_ids, position_ids, sentence_ids, self_attn_mask):
# padding id in vocabulary must be set to 0
emb_out = fluid.layers.embedding(
input=src_ids,
size=[self._voc_size, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=position_ids,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._pos_emb_name, initializer=self._param_initializer))
sent_emb_out = fluid.layers.embedding(
sentence_ids,
size=[self._sent_types, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._sent_emb_name, initializer=self._param_initializer))
emb_out = emb_out + position_emb_out
emb_out = emb_out + sent_emb_out
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
if self._dtype is "float16":
self_attn_mask = fluid.layers.cast(
x=self_attn_mask, dtype=self._dtype)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out = encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='encoder')
def get_sequence_output(self):
return self._enc_out
def get_pooled_output(self, next_sent_index):
"""Get the first feature of each sequence for classification"""
self._reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size], inplace=True)
next_sent_index = fluid.layers.cast(x=next_sent_index, dtype='int32')
next_sent_feat = fluid.layers.gather(
input=self._reshaped_emb_out, index=next_sent_index)
next_sent_feat = fluid.layers.fc(
input=next_sent_feat,
size=self._emb_size,
act="tanh",
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0")
return next_sent_feat
def get_pretraining_output(self, mask_label, mask_pos, labels,
next_sent_index):
"""Get the loss & accuracy for pretraining"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
# extract the first token feature in each sentence
next_sent_feat = self.get_pooled_output(next_sent_index)
# extract masked tokens' feature
mask_feat = fluid.layers.gather(
input=self._reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = pre_process_layer(
mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
next_sent_fc_out = fluid.layers.fc(
input=next_sent_feat,
size=2,
param_attr=fluid.ParamAttr(
name="next_sent_fc.w_0", initializer=self._param_initializer),
bias_attr="next_sent_fc.b_0")
next_sent_loss, next_sent_softmax = fluid.layers.softmax_with_cross_entropy(
logits=next_sent_fc_out, label=labels, return_softmax=True)
next_sent_acc = fluid.layers.accuracy(
input=next_sent_softmax, label=labels)
mean_next_sent_loss = fluid.layers.mean(next_sent_loss)
loss = mean_next_sent_loss + mean_mask_lm_loss
return next_sent_acc, mean_mask_lm_loss, loss
| 39.323009 | 84 | 0.626421 |
ace3e646276427d63961c0fac92822792068b117 | 36,078 | py | Python | lib/oci_utils/vnicutils.py | guidotijskens/oci-utils | 77c75e940f3a9875c97b76f5d740cc65b664efb5 | [
"UPL-1.0"
] | 35 | 2019-04-21T00:58:30.000Z | 2022-01-28T15:22:41.000Z | lib/oci_utils/vnicutils.py | guidotijskens/oci-utils | 77c75e940f3a9875c97b76f5d740cc65b664efb5 | [
"UPL-1.0"
] | 43 | 2019-05-19T20:13:41.000Z | 2022-03-31T17:39:25.000Z | lib/oci_utils/vnicutils.py | guidotijskens/oci-utils | 77c75e940f3a9875c97b76f5d740cc65b664efb5 | [
"UPL-1.0"
] | 23 | 2019-04-10T12:48:00.000Z | 2022-03-25T16:57:47.000Z | # oci-utils
#
# Copyright (c) 2018, 2021 Oracle and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl.
import logging
import os
import os.path
from .oci_api import OCISession
from . import cache
from .metadata import InstanceMetadata
from .impl import network_helpers as NetworkHelpers
from .impl.network_interface import NetworkInterfaceSetupHelper, _intf_dict
from .impl import sudo_utils
_logger = logging.getLogger('oci-utils.vnicutils')
class VNICUtils:
"""Class for managing VNICs
"""
# file with saved vnic information
__vnic_info_file = "/var/lib/oci-utils/vnic_info"
# OBSOLETE: file with VNICs and stuff to exclude from automatic
# configuration. only kept for migration
__net_exclude_file = "/var/lib/oci-utils/net_exclude"
def __init__(self):
""" Class VNICUtils initialisation.
"""
self.vnic_info = self.get_vnic_info()
self._metadata = None
try:
self._metadata = InstanceMetadata().refresh()
except IOError as e:
_logger.warning('Cannot get metadata: %s', str(e))
@staticmethod
def __new_vnic_info():
"""
Create a new vnic info file
Returns
-------
tuple
(vnic info timestamp: datetime, vnic info: dict)
"""
_vnic_info = {
'exclude': [],
'deconfig': [],
'sec_priv_ip': []}
# migration from oci-utils 0.5's net_exclude file
excludes = cache.load_cache(VNICUtils.__net_exclude_file)[1]
if excludes is not None:
_vnic_info['exclude'] = excludes
cache.write_cache(cache_content=_vnic_info,
cache_fname=VNICUtils.__vnic_info_file)
try:
os.remove(VNICUtils.__net_exclude_file)
except Exception as e:
_logger.debug('Cannot remove file [%s]: %s', VNICUtils.__net_exclude_file, str(e))
_logger.debug('Excluded intf: %s ', excludes)
return _vnic_info
def get_excluded_interfaces(self):
"""
Gets excluded interface from auto configuration/deconfiguration
"""
return self.vnic_info['exclude']
def get_vnic_info(self):
"""
Load the vnic_info file. If the file is missing , a new one is created.
Returns
-------
tuple (int, dict)
(vnic info timestamp: datetime, vnic info: dict)
"""
self.vnic_info_ts, self.vnic_info = cache.load_cache(VNICUtils.__vnic_info_file)
if self.vnic_info is None:
# GT
self.vnic_info = {'exclude': [],
'deconfig': [],
'sec_priv_ip': []}
# self.vnic_info = {'exclude': []}
# GT
# for compatibility
if 'deconfig' not in self.vnic_info:
self.vnic_info['deconfig'] = []
if 'sec_priv_ip' not in self.vnic_info:
self.vnic_info['sec_priv_ip'] = []
return self.vnic_info
def save_vnic_info(self):
"""
Save self.vnic_info in the vnic_info file.
Returns
-------
int
The timestamp of the file or None on failure.
"""
_logger.debug("Saving vnic_info.")
# _ = cache.write_cache(cache_content=self.vnic_info, cache_fname=VNICUtils.__vnic_info_file)
return cache.write_cache(cache_content=self.vnic_info, cache_fname=VNICUtils.__vnic_info_file)
def set_namespace(self, ns):
"""
Set the 'ns' field of the vnic_info dict to the given value. This
value is passed to the secondary vnic script with the -n option and
is used to place the interface in the given namespace. The default
is no namespace.
Parameters
----------
ns: str
The namespace value.
"""
_logger.debug('Setting namespace %s', ns)
self.vnic_info['ns'] = ns
def set_sshd(self, val):
"""
Set the 'sshd' field of the vnic_info dict to the given value.
Parameters
----------
val: bool
When set to True, the secondary vnic script is called with
the -r option, which, if a namespace is also specified,
runs sshd in the namespace. The default is False.
"""
_logger.debug('Set sshd to %s', val)
self.vnic_info['sshd'] = val
def add_private_ip(self, ipaddr, vnic_id):
"""
Add the given secondary private IP to vnic_info.
Save vnic info to the vnic_info file.
Parameters
----------
ipaddr: str
The secondary IP address.
vnic_id: int
The vNIC id.
"""
_logger.debug('Adding ip %s to %s.', ipaddr, vnic_id )
_interfaces = self.get_network_config()
_intf = None
for _interface in _interfaces:
if _interface.get('VNIC') == vnic_id:
_intf = _interface
break
if _intf is None:
# cannot happen
_logger.debug('WARNING : cannot find vnic with id [%s]: caller did not check ?')
if 'MISSING_SECONDARY_IPS' not in _intf:
_intf['MISSING_SECONDARY_IPS'] = [ipaddr]
else:
if ipaddr not in _intf['MISSING_SECONDARY_IPS']:
_intf['MISSING_SECONDARY_IPS'].append(ipaddr)
if [ipaddr, vnic_id] not in self.vnic_info['sec_priv_ip']:
self.vnic_info['sec_priv_ip'].append([ipaddr, vnic_id])
self.save_vnic_info()
self._config_secondary_intf(_intf)
def del_private_ip(self, ipaddr, vnic_id):
"""
Delete secondary private IP from vnic_info save vnic_info to the
vnic_info file.
Parameters
----------
ipaddr: str
The IP addr to be removed.
vnic_id: int
The VNIC ID.
Returns
-------
tuple
(exit code: int, output message).
"""
_interfaces = self.get_network_config()
_interface_to_delete = None
for _interface in _interfaces:
if _interface.get('VNIC') == vnic_id \
and (_interface.get('ADDR') == ipaddr
or ipaddr in _interface.get('SECONDARY_ADDRS', ())):
_interface_to_delete = _interface
break
if not _interface_to_delete:
_logger.debug('del_private_ip. IP [%s] not found on vNIC %s', ipaddr, vnic_id)
return 0, 'IP %s is not configured.' % ipaddr
# 1. delete any rule for this ip
# NetworkHelpers.remove_ip_addr_rules(ipaddr)
NetworkHelpers.remove_ip_rules(ipaddr)
# 2. remove addr from the system
NetworkInterfaceSetupHelper(_interface_to_delete).remove_secondary_address(ipaddr)
# 3. removes the mac address from the unmanaged-devices list in then NetworkManager.conf file.
NetworkHelpers.add_mac_to_nm(_interface_to_delete['MAC'])
# 4. update cache
if [ipaddr, vnic_id] in self.vnic_info['sec_priv_ip']:
self.vnic_info['sec_priv_ip'].remove([ipaddr, vnic_id])
self.include(ipaddr)
self.save_vnic_info()
return 0, ''
def _is_intf_excluded(self, interface):
"""
Checks if this interface is excluded
Checks if interface name, VNIC ocid or ip addr is part of excluded items
"""
for excl in self.vnic_info.get('exclude', ()):
if excl in (interface['IFACE'], interface['VNIC'], interface['ADDR']):
return True
return False
def exclude(self, item):
"""
Remove item from the "exclude" list. IP addresses or interfaces that are
excluded from automatic configuration.
Parameters
----------
item: str
Item (IP or interface) to be excluded.
"""
if item not in self.vnic_info['exclude']:
_logger.debug('Adding %s to "exclude" list', item)
self.vnic_info['exclude'].append(item)
_ = self.save_vnic_info()
def include(self, item):
"""
Add item to the "exclude" list, IP addresses or interfaces that
are excluded from automatic configuration.
Parameters
----------
item: str
Item (IP or interface) to be excluded.
"""
if item in self.vnic_info['exclude']:
_logger.debug('Removing %s from "exclude" list', item)
self.vnic_info['exclude'].remove(item)
_ = self.save_vnic_info()
def auto_config(self, sec_ip, deconfigured=True):
"""
Auto configure VNICs.
Parameters
----------
sec_ip: list of tuple (<ip adress>,<vnic ocid>)
secondary IPs to add to vnics. can be None or empty
deconfigured: bool
if True, does configure manually unconfigured interfaces.
Returns
-------
"""
_all_intf = self.get_network_config()
_logger.debug('Auto config %s, by oci-network-config: %s', sec_ip, deconfigured)
# we may need a mapping of intf by physical NIC index
# for BMs secondary VNIC are not plumbed
# {<index>: <intf name>}
_by_nic_index = {}
# the interfaces to be configured according to metadata
_all_to_be_configured = []
# the interfaces on which a secondary interface must be added
_all_to_be_modified = []
# the interfaces to be unconfigured according to metadata
_all_to_be_deconfigured = []
# 1.1 compose list of interface which need configuration
# 1.2 compose list of interface which need deconfiguration
for _intf in _all_intf:
if _intf['IFACE'] != '-':
# keep track of interface by NIC index
_by_nic_index[_intf['NIC_I']] = _intf['IFACE']
# Is this intf excluded ?
if self._is_intf_excluded(_intf):
continue
# add secondary IPs if any
if sec_ip:
for (ip, vnic) in sec_ip:
if vnic == _intf['VNIC']:
if 'MISSING_SECONDARY_IPS' not in _intf:
_intf['MISSING_SECONDARY_IPS'] = [ip]
else:
if ip not in _intf['MISSING_SECONDARY_IPS']:
_intf['MISSING_SECONDARY_IPS'].append(ip)
# GT
_logger.debug('Auto config interface %s %s', _intf['ADDR'], _intf['CONFSTATE'])
if _intf['CONFSTATE'] == 'ADD':
if deconfigured:
_logger.debug('Auto config configure called via oci-network-config')
if deconfigured or not _intf['ADDR'] in self.vnic_info['deconfig']:
_all_to_be_configured.append(_intf)
# take care of secondary addresses.
# at this point we cannot rely on MISSING_SECONDARY_IPS as we are configured "new" interface
# in order to use the same code path, set MISSING_SECONDARY_IPS here so _all_to_be_modified set
# will also contain this one. Need better refactoring: enough for now.
if len(_intf.get('SECONDARY_ADDRS', ())) > 0:
_intf['MISSING_SECONDARY_IPS'] = _intf['SECONDARY_ADDRS']
if _intf['CONFSTATE'] == 'DELETE':
_all_to_be_deconfigured.append(_intf)
# GT
# if 'MISSING_SECONDARY_IPS' in _intf:
if deconfigured and 'MISSING_SECONDARY_IPS' in _intf:
_all_to_be_modified.append(_intf)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("interfaces to be configured: %d", len(_all_to_be_configured))
for _in in _all_to_be_configured:
_logger.debug("CONFIGURE %s", _in)
_logger.debug("interfaces to be unconfigured: %d", len(_all_to_be_deconfigured))
for _in in _all_to_be_deconfigured:
_logger.debug("DECONFIGURE %s", _in)
_logger.debug("interfaces to be modified: %d", len(_all_to_be_modified))
for _in in _all_to_be_modified:
_logger.debug("MODIFY %s", _in)
# 2 configure the ones which need it
for _intf in _all_to_be_configured:
ns_i = None
if 'ns' in self.vnic_info:
# if requested to use namespace, compute namespace name pattern
ns_i = {}
if self.vnic_info['ns']:
ns_i['name'] = self.vnic_info['ns']
else:
ns_i['name'] = 'ons%s' % _intf['IFACE']
ns_i['start_sshd'] = 'sshd' in self.vnic_info
try:
# for BMs, IFACE can be empty ('-'), we local physical NIC
# thank to NIC index
# make a copy of it to change the IFACE
_intf_to_use = _intf_dict(_intf)
if self._metadata is None:
raise ValueError('no metadata information')
if self._metadata['instance']['shape'].startswith('BM') and _intf['IFACE'] == '-':
_intf_to_use['IFACE'] = _by_nic_index[_intf['NIC_I']]
_intf_to_use['STATE'] = "up"
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("begin configuration of %s", _intf_to_use)
_auto_config_intf(ns_i, _intf_to_use)
# disable network manager for that device
NetworkHelpers.remove_mac_from_nm(_intf['MAC'])
# setup routes
self._auto_config_intf_routing(ns_i, _intf_to_use)
#
# GT
self.config(_intf['ADDR'])
except Exception as e:
# best effort , just issue warning
_logger.warning('Cannot configure %s: %s', _intf_to_use, str(e))
# 3 deconfigure the one which need it
for _intf in _all_to_be_deconfigured:
try:
self._auto_deconfig_intf_routing(_intf)
_auto_deconfig_intf(_intf)
except Exception as e:
# best effort , just issue warning
_logger.warning('Cannot deconfigure %s: %s', _intf, str(e))
# 4 add secondaries IP address
for _intf in _all_to_be_modified:
if self._metadata['instance']['shape'].startswith('BM') and _intf['IFACE'] == '-':
# it may happen if we came after configuring the interface by injecting MISSING_SECONDARY_IPS
_intf['IFACE'] = _by_nic_index[_intf['NIC_I']]
_intf['STATE'] = "up"
self._config_secondary_intf(_intf)
def _deconfig_secondary_addr(self, intf_infos, address):
"""
Removes an IP address from a device
Parameters:
-----------
device: network device as str
address: IP address to be removed
namespace: the network namespace (optional)
Returns:
--------
None
Raise:
------
Exception in case of failure
"""
_logger.debug("Removing IP addr rules")
# NetworkHelpers.remove_ip_addr_rules(address)
NetworkHelpers.remove_ip_rules(address)
_logger.debug("Removing IP addr [%s] from [%s]", address, intf_infos)
NetworkInterfaceSetupHelper(intf_infos).remove_secondary_address(address)
def auto_deconfig(self, sec_ip):
"""
De-configure VNICs. Run the secondary vnic script in automatic
de-configuration mode (-d).
Parameters
----------
sec_ip: list of tuple (<ip adress>,<vnic ocid>)
secondary IPs to add to vnics. can be None or empty
Returns
-------
tuple
(exit code: int, output from the "sec vnic" script execution.)
"""
_all_intf = self.get_network_config()
_logger.debug('Deconfigure all interfaces %s', _all_intf)
# if we have secondary addrs specified, just take care of these
# vnic OCID give us the mac address then select the right interface which has the ip
if sec_ip:
_translated = []
if self._metadata is None:
return 1, 'No metadata available'
_all_vnic_md = self._metadata['vnics']
# 1. locate the MAC: translate ip/vnic to ip/mac
for (ip, vnic) in sec_ip:
_found = False
for md_vnic in _all_vnic_md:
if md_vnic['vnicId'] == vnic:
_found = True
_logger.debug('Located vnic, mac is %s', md_vnic['macAddr'])
_translated.append((ip, md_vnic['macAddr']))
break
if not _found:
_logger.warning('VNIC not found : %s ', vnic)
for (ip, mac) in _translated:
# fetch the right interface
_found = False
for intf in _all_intf:
if intf['MAC'] == mac:
if 'SECONDARY_ADDRS' in intf and ip in intf['SECONDARY_ADDRS']:
_found = True
self._deconfig_secondary_addr(intf, ip)
break
if not _found:
_logger.warning('IP %s not found', ip)
else:
# unconfigure all
for intf in _all_intf:
# Is this intf the primary ?
if intf.has('IS_PRIMARY'):
continue
# Is this intf has a configuration to be removed ?
if intf['CONFSTATE'] == 'ADD':
continue
# Is this intf excluded ?
if self._is_intf_excluded(intf):
continue
for secondary_addr in intf.get('SECONDARY_ADDRS', ()):
self._deconfig_secondary_addr(intf, secondary_addr)
self._auto_deconfig_intf_routing(intf)
_auto_deconfig_intf(intf)
#
# GT
self.unconfig(intf['ADDR'])
return 0, ''
def unconfig(self, item):
"""
Add item to the deconfig list.
Parameters
----------
item: str
Item (IP or interface) to be unconfigured.
"""
if item not in self.vnic_info['deconfig']:
_logger.debug('Adding %s to "deconfig" list', item)
self.vnic_info['deconfig'].append(item)
_ = self.save_vnic_info()
def config(self, item):
"""
Remove item to the "deconfig" list.
Parameters
----------
item: str
Item (IP or interface) to be (re)configured.
"""
if item in self.vnic_info['deconfig']:
_logger.debug('Removing %s from "deconfig" list', item)
self.vnic_info['deconfig'].remove(item)
_ = self.save_vnic_info()
def _get_priv_addrs(self):
"""
Gets all vnic private addrs
Returns:
--------
dict : a vnic ocid indexed dict of list of IPs
"""
res = {}
oci_sess = None
my_instance = None
try:
oci_sess = OCISession()
my_instance = oci_sess.this_instance()
except Exception as e:
_logger.debug('Cannot get OCI session: %s', str(e), stack_info=True)
if bool(my_instance):
p_ips = my_instance.all_private_ips()
for p_ip in p_ips:
_ocid = p_ip.get_vnic_ocid()
_addr = p_ip.get_address()
if _ocid not in res:
res[_ocid] = []
res[_ocid].append(_addr)
return res
def get_network_config(self):
"""
Get network configuration.
fetch information from this instance metadata and aggregate
it to system information. Information form metadata take precedence
Returns
-------
list of dict
keys are
CONFSTATE 'uncfg' indicates missing IP config, 'missing' missing VNIC,
'excl' excluded (-X), '-' hist configuration match oci vcn configuration
ADDR IP address
SPREFIX subnet CIDR prefix
SBITS subnet mask bits
VIRTRT virutal router IP address
NS namespace (if any)
IND interface index (if BM)
IFACE interface (underlying physical if VLAN is also set)
VLTAG VLAN tag (if BM)
VLAN IP virtual LAN (if any)
STATE state of interface
MAC MAC address
NIC_I (physical) NIC index
VNIC VNIC object identifier
IS_PRIMARY is this interface the primary one ? (can be missing)
SECONDARY_ADDRS secondary addresses
"""
interfaces = []
_all_intfs = NetworkHelpers.get_network_namespace_infos()
# _logger.debug('All interfaces: %s', _all_intfs)
# for BM cases (using macvlan/vlan) when using namespace , some interfaces (the macvlan ones within namespace)
# do not have the 'link' property but the 'link_idx'
# First build a "link by id" map
# Note: loopback appears with index '1' in all namespaces.
_link_by_idx = {}
for _namespace, _nintfs in _all_intfs.items():
for _i in _nintfs:
_link_by_idx[_i['index']] = _i['device']
_all_from_system = []
for _namespace, _nintfs in _all_intfs.items():
for _i in _nintfs:
if "NO-CARRIER" in _i['flags'] or "LOOPBACK" in _i['flags']:
continue
if _i['type'] != 'ether':
continue
_intf = _intf_dict()
if _i.get('mac'):
_intf['MAC'] = _i.get('mac')
_intf['IFACE'] = _i['device']
if 'link' in _i and _i['link'] is not None:
_intf['LINK'] = _i['link']
else:
# in that case, try with index if we have it
if _i['link_idx']:
_intf['LINK'] = _link_by_idx[_i['link_idx']]
if 'subtype' in _i:
_intf['LINKTYPE'] = _i['subtype']
else:
_intf['LINKTYPE'] = 'ether'
_intf['IND'] = _i['index']
_intf['STATE'] = _i['opstate']
# default namespace is empty string
if _namespace and _namespace != '':
_intf['NS'] = _namespace
if _i.get('vlanid'):
_intf['VLAN'] = _i.get('vlanid')
if len(_i.get('addresses', [])) > 0:
_intf['CONFSTATE'] = '-'
_intf['ADDR'] = _i.get('addresses')[0]['address']
if len(_i.get('addresses', [])) > 1:
# first one in the list is the primary address of that vnic
_intf['SECONDARY_ADDRS'] = [ip['address'] for ip in _i.get('addresses')[1:]]
else:
if not _i.get('is_vf'):
# by default, before correlation, set it to DELETE
_intf['CONFSTATE'] = 'DELETE'
_all_from_system.append(_intf)
_all_from_metadata = []
_first_loop = True
if self._metadata is None:
_logger.warning('no metadata available')
else:
_ip_per_id = self._get_priv_addrs()
for md_vnic in self._metadata['vnics']:
_intf = _intf_dict()
if _first_loop:
# primary always come first
_intf['IS_PRIMARY'] = True
_first_loop = False
_intf['MAC'] = md_vnic['macAddr'].upper()
_intf['ADDR'] = md_vnic['privateIp']
_intf['SPREFIX'] = md_vnic['subnetCidrBlock'].split('/')[0]
_intf['SBITS'] = md_vnic['subnetCidrBlock'].split('/')[1]
_intf['VIRTRT'] = md_vnic['virtualRouterIp']
_intf['VLTAG'] = md_vnic['vlanTag']
_intf['VNIC'] = md_vnic['vnicId']
if 'nicIndex' in md_vnic:
# VMs do not have such attr
_intf['NIC_I'] = md_vnic['nicIndex']
if md_vnic['vnicId'] in _ip_per_id:
# get all but the primary one
_intf['SECONDARY_ADDRS'] = \
[_ip for _ip in _ip_per_id[md_vnic['vnicId']] if _ip != md_vnic['privateIp']]
_all_from_metadata.append(_intf)
# now we correlate information
# precedence is given to metadata
for interface in _all_from_metadata:
try:
# locate the one with same ethernet address
_candidates = [_i for _i in _all_from_system if _i['MAC'] == interface['MAC']]
_state = 'ADD'
_have_to_be_added = set()
if len(_candidates) == 1:
# only one found , no ambiguity
# treat secondary addrs: if have some in metadata not present on system , we have to plumb them
_have_to_be_added = set(interface.get('SECONDARY_ADDRS', [])).difference(_candidates[0].get('SECONDARY_ADDRS', []))
interface.update(_candidates[0])
if _candidates[0].has('ADDR'):
# an addr on the correlated system intf -> state is '-'
_state = '-'
elif len(_candidates) >= 2:
# we do not expect to have more than 2 anyway
# surely macvlan/vlans involved (BM case)
# the macvlan interface give us the addr and the actual link
# the vlan interface give us the vlan name
_macvlan_is = [_i for _i in _candidates if _i['LINKTYPE'] in ('macvlan', 'macvtap')]
_vlan_is = [_i for _i in _candidates if _i['LINKTYPE'] == 'vlan']
if len(_macvlan_is) > 0 and len(_vlan_is) > 0:
#
# treat secondary addrs: if have some in metadata not present on system , we have to plumb them
_have_to_be_added = set(interface.get('SECONDARY_ADDRS', [])).difference(_vlan_is[0].get('SECONDARY_ADDRS', []))
interface.update(_macvlan_is[0])
interface['VLAN'] = _vlan_is[0]['IFACE']
interface['IFACE'] = _macvlan_is[0]['LINK']
if _vlan_is[0].has('ADDR'):
_state = '-'
if _vlan_is[0].has('SECONDARY_ADDRS'):
interface['SECONDARY_ADDRS'] = _vlan_is[0]['SECONDARY_ADDRS']
interface['CONFSTATE'] = _state
#
# clean up system list
_all_from_system = [_i for _i in _all_from_system if _i['MAC'] != interface['MAC']]
except ValueError as e:
_logger.debug('Error while parsing [%s]: %s', str(interface), str(e))
finally:
if len(_have_to_be_added) > 0:
# this key will trigger configuration (see auto_config())
interface['MISSING_SECONDARY_IPS'] = list(_have_to_be_added)
interfaces.append(interface)
# now collect the one left on system
for interface in _all_from_system:
interface['CONFSTATE'] = 'DELETE'
interfaces.append(interface)
# final round for the excluded
for interface in interfaces:
if self._is_intf_excluded(interface):
interface['CONFSTATE'] = 'EXCL'
if interface['is_vf'] and interface['CONFSTATE'] == 'DELETE':
# revert this as '-' , as DELETE state means nothing for VFs
interface['CONFSTATE'] = '-'
return interfaces
def _compute_routing_table_name(self, interface_info):
"""
Compute the routing table name for a givne interface
return the name as str
"""
if self._metadata is None:
raise ValueError('no metadata avaialable')
if self._metadata['instance']['shape'].startswith('BM'):
return 'ort%svl%s' % (interface_info['NIC_I'], interface_info['VLTAG'])
return 'ort%s' % interface_info['IND']
def _auto_deconfig_intf_routing(self, intf_infos):
"""
Deconfigure interface routing
parameter:
intf_info: interface info as dict
keys: see VNICUTils.get_network_config
Raise:
Exception. if configuration failed
"""
# for namespaces the subnet and default routes will be auto deleted with the namespace
if not intf_infos.has('NS'):
_route_table_name = self._compute_routing_table_name(intf_infos)
# TODO: rename method to remove_ip_rules
# NetworkHelpers.remove_ip_addr_rules(_route_table_name)
NetworkHelpers.remove_ip_rules(_route_table_name)
NetworkHelpers.delete_route_table(_route_table_name)
def _auto_config_intf_routing(self, net_namespace_info, intf_infos):
"""
Configure interface routing
parameter:
net_namespace_info:
information about namespace (or None if no namespace use)
keys:
name : namespace name
start_sshd: if True start sshd within the namespace
intf_info: interface info as dict
keys: see VNICITils.get_network_config
Raise:
Exception. if configuration failed
"""
_intf_to_use = intf_infos['IFACE']
if self._metadata['instance']['shape'].startswith('BM') and intf_infos['VLTAG'] != "0":
# in that case we operate on the VLAN tagged intf no
_intf_to_use = '%sv%s' % (intf_infos['IFACE'], intf_infos['VLTAG'])
if net_namespace_info:
_logger.debug("default route add")
ret, out = NetworkHelpers.add_static_ip_route(
'default', 'via', intf_infos['VIRTRT'], namespace=net_namespace_info['name'])
if ret != 0:
raise Exception("cannot add namespace %s default gateway %s: %s" %
(net_namespace_info['name'], intf_infos['VIRTRT'], out))
_logger.debug("added namespace %s default gateway %s", net_namespace_info['name'], intf_infos['VIRTRT'])
if net_namespace_info['start_sshd']:
ret = sudo_utils.call(['/usr/sbin/ip', 'netns', 'exec', net_namespace_info['name'], '/usr/sbin/sshd'])
if ret != 0:
raise Exception("cannot start ssh daemon")
_logger.debug('sshd daemon started')
else:
_route_table_name = self._compute_routing_table_name(intf_infos)
NetworkHelpers.add_route_table(_route_table_name)
_logger.debug("default route add")
ret, out = NetworkHelpers.add_static_ip_route(
'default', 'via', intf_infos['VIRTRT'], 'dev', _intf_to_use, 'table', _route_table_name)
if ret != 0:
raise Exception("cannot add default route via %s on %s to table %s" %
(intf_infos['VIRTRT'], _intf_to_use, _route_table_name))
_logger.debug("added default route via %s dev %s table %s",
intf_infos['VIRTRT'], _intf_to_use, _route_table_name)
# create source-based rule to use table
ret, out = NetworkHelpers.add_static_ip_rule('from', intf_infos['ADDR'], 'lookup', _route_table_name)
if ret != 0:
raise Exception("Cannot add rule from %s use table %s" % (intf_infos['ADDR'], _route_table_name))
_logger.debug("Added rule for routing from %s lookup %s with default via %s",
intf_infos['ADDR'], _route_table_name, intf_infos['VIRTRT'])
def _config_secondary_intf(self, intf_infos):
"""
Configures interface secodnary IPs
parameter:
intf_info: interface info as dict
keys: see VNICUtils.get_network_config
Raise:
Exception. if configuration failed
"""
_logger.debug('Configure secondary interfaces: %s', intf_infos)
_route_table_name = self._compute_routing_table_name(intf_infos)
_sec_addrs = []
if intf_infos.has('SECONDARY_ADDRS'):
_sec_addrs = intf_infos.get('SECONDARY_ADDRS')
for secondary_ip in intf_infos['MISSING_SECONDARY_IPS']:
_logger.debug("Adding secondary IP address %s to interface (or VLAN) %s", secondary_ip, intf_infos['IFACE'])
NetworkInterfaceSetupHelper(intf_infos).add_secondary_address(secondary_ip)
NetworkHelpers.add_route_table(_route_table_name)
ret, _ = NetworkHelpers.add_static_ip_rule('from', secondary_ip, 'lookup', _route_table_name)
if ret != 0:
raise Exception("Cannot add rule from %s use table %s" % (secondary_ip, _route_table_name))
_logger.debug("Added rule for routing from %s lookup %s with default via %s",
secondary_ip, _route_table_name, intf_infos['VIRTRT'])
def _auto_config_intf(net_namespace_info, intf_infos):
"""
Configures interface
parameter:
net_namespace_info:
information about namespace (or None if no namespace use)
keys:
name : namespace name
start_sshd: if True start sshd within the namespace
intf_info: interface info as dict
keys: see VNICITils.get_network_config
Raise:
Exception. if configuration failed
"""
# if interface is not up bring it up
if intf_infos['STATE'] != 'up':
_logger.debug('Bringing intf [%s] up ', intf_infos['IFACE'])
ret = sudo_utils.call(['/usr/sbin/ip', 'link', 'set', 'dev', intf_infos['IFACE'], 'up'])
if ret != 0:
raise Exception('Cannot bring interface up')
# create network namespace if needed
if net_namespace_info is not None:
if not NetworkHelpers.is_network_namespace_exists(net_namespace_info['name']):
_logger.debug('creating namespace [%s]', net_namespace_info['name'])
NetworkHelpers.create_network_namespace(net_namespace_info['name'])
NetworkInterfaceSetupHelper(intf_infos, net_namespace_info['name']).setup()
else:
NetworkInterfaceSetupHelper(intf_infos).setup()
def _auto_deconfig_intf(intf_infos):
"""
Deconfigures interface
parameter:
intf_info: interface info as dict
keys: see VNICITils.get_network_config
Raise:
Exception. if configuration failed
"""
if intf_infos.has('NS'):
NetworkHelpers.kill_processes_in_namespace(intf_infos['NS'])
# TODO EJANNET : LOOP on ('SECONDARY_ADDRS')
# -> NetworkInterfaceSetupHelper(intf_infos).remove_secondary_address()
NetworkInterfaceSetupHelper(intf_infos).tear_down()
# delete namespace
if intf_infos.has('NS'):
_logger.debug('deleting namespace [%s]', intf_infos['NS'])
NetworkHelpers.destroy_network_namespace(intf_infos['NS'])
NetworkHelpers.add_mac_to_nm(intf_infos['MAC'])
| 39.73348 | 136 | 0.560702 |
ace3e6578bc6e61681c68edca3abf4a8cc2388c9 | 2,604 | py | Python | a2ml/api/auger/config.py | deeplearninc/a2ml | f97dcbd973dfe083e41ffb2be724b84cc3bfbea5 | [
"Apache-2.0"
] | 7 | 2019-04-16T01:59:28.000Z | 2019-06-26T15:29:58.000Z | a2ml/api/auger/config.py | deeplearninc/a2ml | f97dcbd973dfe083e41ffb2be724b84cc3bfbea5 | [
"Apache-2.0"
] | 12 | 2019-06-10T17:44:31.000Z | 2019-06-21T12:56:21.000Z | a2ml/api/auger/config.py | deeplearninc/a2ml | f97dcbd973dfe083e41ffb2be724b84cc3bfbea5 | [
"Apache-2.0"
] | 1 | 2019-05-18T13:46:53.000Z | 2019-05-18T13:46:53.000Z | class AugerConfig(object):
def __init__(self, ctx):
super(AugerConfig, self).__init__()
self.ctx = ctx
def set_data_set(self, name, source=None, validation=False, user_name=None):
#TODO: add more providers later
if validation:
self.ctx.config.set('experiment/validation_dataset', name)
if self.ctx.use_auger_cloud() and 'azure' in self.ctx.get_providers():
self.ctx.config.set('experiment/validation_dataset', name, "azure")
else:
#print("set_data_set: %s"%self.ctx.use_auger_cloud())
self.ctx.config.set('dataset', name)
if user_name:
self.ctx.config.set('dataset_name', user_name)
self.ctx.config.set(f'experiments/{user_name}/dataset', self.ctx.config.get('dataset'))
if self.ctx.use_auger_cloud() and 'azure' in self.ctx.get_providers():
self.ctx.config.set('dataset', name, "azure")
self.ctx.config.write_all()
return self
def set_experiment(self, experiment_name, experiment_session_id):
self.ctx.config.set('experiment/name', experiment_name)
self.ctx.config.set('experiment/experiment_session_id', experiment_session_id)
if self.ctx.config.get('dataset_name'):
dataset_name = self.ctx.config.get('dataset_name')
self.ctx.config.set(f'experiments/{dataset_name}/experiment_name', experiment_name)
self.ctx.config.set(f'experiments/{dataset_name}/experiment_session_id', experiment_session_id)
self.ctx.config.set(f'experiments/{dataset_name}/dataset', self.ctx.config.get('dataset'))
self.ctx.config.write()
def _get_experiment_by_dataset(self):
dataset_name = self.ctx.config.get('dataset_name')
experiments = self.ctx.config.get('experiments', {})
return experiments.get(dataset_name, {})
def get_experiment(self):
return self._get_experiment_by_dataset().get('experiment_name',
self.ctx.config.get('experiment/name'))
def get_experiment_session(self):
return self._get_experiment_by_dataset().get('experiment_session_id',
self.ctx.config.get('experiment/experiment_session_id'))
def get_dataset(self):
return self._get_experiment_by_dataset().get('dataset',
self.ctx.config.get('dataset'))
def set_project(self, project_name):
self.ctx.config.set('name', project_name)
self.ctx.config.write()
return self
| 43.4 | 107 | 0.643625 |
ace3e6f9c8a99f681030e19a2d6e9009f28702ab | 3,628 | py | Python | slot_language/object_slot/aug_model.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | slot_language/object_slot/aug_model.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | slot_language/object_slot/aug_model.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
from torch.nn import functional as F
import torchvision.transforms.functional as TF
from obj_model import ObjSlotAttentionModel
class ObjAugSlotAttentionModel(nn.Module):
def __init__(self, model: ObjSlotAttentionModel, eps: float = 1e-6):
super().__init__()
self.model = model
self.eps = eps
def forward_test(self, data):
return self.model(
dict(img=data['img'], text=data['text'], padding=data['padding']))
def forward(self, data):
"""Forward function.
Args:
data (dict): Input data dict containing the following items:
- img/flipped_img: One frame and its (potentially) flipped version
- is_flipped: Boolean
- text: Text description corresponding to img
- padding: Pad 0 for background slots, [B, num_slots]
- shuffled_text: Shuffled text, used for `flip_img`
- shuffled_idx: Order of the shuffled text, [B, num_slots]
- is_shuffled: Boolean
"""
if not self.training:
return self.forward_test(data)
# at least one augmentation is applied
assert data['is_flipped'][0].item() or data['is_shuffled'][0].item()
x = dict(
img=torch.cat([data['img'], data['flipped_img']], dim=0),
text=torch.cat([data['text'], data['shuffled_text']], dim=0),
padding=data['padding'].repeat(2, 1))
recon_combined, recons, masks, slots = self.model(x)
return recon_combined, recons, masks, slots
def loss_function(self, input):
"""Calculate loss.
Three loss components:
- MSE reconstruction loss
- Equivariance loss
- Entropy loss
"""
recon_combined, recons, masks, slots = self.forward(input)
if not self.training:
recon_loss = F.mse_loss(recon_combined, input['img'])
return {
"recon_loss": recon_loss,
}
recon_loss = F.mse_loss(
recon_combined,
torch.cat([input['img'], input['flipped_img']], dim=0))
loss_dict = {
"recon_loss": recon_loss,
}
masks = masks[:, :, 0] # [2*B, num_slots, H, W]
masks = masks + self.eps
masks = masks / masks.sum(dim=1, keepdim=True)
if self.model.use_entropy_loss:
entropy_loss = (-masks * torch.log(masks)).sum(1).mean()
loss_dict['entropy'] = entropy_loss
# Equivariance loss
padding = input['padding'] # [B, num_slots]
bs = padding.shape[0]
obj_mask = (padding == 1)
masks1, masks2 = masks[:bs], masks[bs:] # [B, num_slots, H, W]
if input['is_flipped'][0].item():
masks2 = TF.hflip(masks2)
# we only penalize foreground obj masks
if not input['is_shuffled'][0].item():
masks1, masks2 = masks1[obj_mask], masks2[obj_mask] # [M, H, W]
else:
shuffled_idx = input['shuffled_idx'].long() # [B, num_slots]
masks1 = torch.cat([
masks1[i, obj_mask[i]][shuffled_idx[i, obj_mask[i]]]
for i in range(bs)
],
dim=0)
masks2 = masks2[obj_mask]
# masks are probability tensors, however torch.kld requires log-prob
equivariance_loss = F.kl_div(torch.log(masks1), masks2) + \
F.kl_div(torch.log(masks2), masks1)
loss_dict['equivariance_loss'] = equivariance_loss
return loss_dict
| 37.402062 | 82 | 0.568633 |
ace3e772c32e16ad9f4950169ad842a6a3bdb64b | 5,361 | py | Python | bokeh-app/main.py | asartori86/interactive_covid_plot | 523212438e66d70c619dbd1ee9d0bd2d539d93e6 | [
"BSD-3-Clause"
] | null | null | null | bokeh-app/main.py | asartori86/interactive_covid_plot | 523212438e66d70c619dbd1ee9d0bd2d539d93e6 | [
"BSD-3-Clause"
] | null | null | null | bokeh-app/main.py | asartori86/interactive_covid_plot | 523212438e66d70c619dbd1ee9d0bd2d539d93e6 | [
"BSD-3-Clause"
] | null | null | null | from bokeh.plotting import figure, output_notebook, show, curdoc
from bokeh.models import CDSView, GroupFilter, ColumnDataSource, CategoricalColorMapper
from bokeh.transform import factor_cmap
from bokeh.palettes import Paired, Spectral3, GnBu3, Turbo256, viridis, turbo, Category20,cividis,magma,plasma,Category10_4,Category10_9,Category10,Set1
from bokeh.models import HoverTool
from bokeh.models.widgets import TextInput
from bokeh.layouts import column, row
import itertools
import numpy as np
import pandas as pd
def loc_eval(x, b):
loc_est = 0
for i in enumerate(b): loc_est+=i[1]*(x**i[0])
return(loc_est)
def loess(yvals, alpha=0.7, poly_degree=1):
#all_data = sorted(zip(data[xvals].tolist(), data[yvals].tolist()), key=lambda x: x[0])
yvals=yvals.to_numpy()
xvals=np.arange(len(yvals))
evalDF = pd.DataFrame(columns=['v','g'])
n = len(xvals)
m = n + 1
q = int(np.floor(n * alpha) if alpha <= 1.0 else n)
avg_interval = ((max(xvals)-min(xvals))/len(xvals))
v_lb = min(xvals)-(.5*avg_interval)
v_ub = (max(xvals)+(.5*avg_interval))
v = enumerate(np.linspace(start=v_lb, stop=v_ub, num=m), start=1)
xcols = [np.ones_like(xvals)]
for j in range(1, (poly_degree + 1)):
xcols.append([i ** j for i in xvals])
X = np.vstack(xcols).T
for i in v:
iterpos = i[0]
iterval = i[1]
iterdists = sorted([(j, np.abs(j-iterval)) for j in xvals], key=lambda x: x[1])
_, raw_dists = zip(*iterdists)
scale_fact = raw_dists[q-1]
scaled_dists = [(j[0],(j[1]/scale_fact)) for j in iterdists]
weights = [(j[0],((1-np.abs(j[1]**3))**3 if j[1]<=1 else 0)) for j in scaled_dists]
_, weights = zip(*sorted(weights, key=lambda x: x[0]))
_, raw_dists = zip(*sorted(iterdists, key=lambda x: x[0]))
_, scaled_dists = zip(*sorted(scaled_dists,key=lambda x: x[0]))
W = np.diag(weights)
b = np.linalg.inv(X.T @ W @ X) @ (X.T @ W @ yvals)
local_est = loc_eval(iterval, b)
iterDF2 = pd.DataFrame({
'v' :[iterval],
'g' :[local_est]
})
evalDF = pd.concat([evalDF, iterDF2])
evalDF = evalDF[['v','g']]
return(evalDF)
def compute_new_cases(df,city):
import numpy as np
totale = df.loc[df.denominazione_provincia == city,'totale_casi'].to_numpy()
new = np.zeros_like(totale)
new[1:] = np.ediff1d(totale)
df.loc[df.denominazione_provincia == city, 'new'] = new
df = pd.read_csv('https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv',parse_dates=['data'])
for c in df.denominazione_provincia.unique():
compute_new_cases(df,c)
tw = TextInput(title='List of cities (e.g., trieste treviso milano roma)', value='')
def make_tot_fig():
hover_tool = HoverTool(tooltips=[
('City', '@denominazione_provincia'),
('Data','@data{%F}'),
('Totale casi', '@totale_casi',)],
formatters={'@data': 'datetime'})
# hover_tool.mode='vline'
tot_fig = figure(x_axis_type='datetime', x_axis_label='data m/d', y_axis_label='totale casi',
tools=[hover_tool,'crosshair'])
return tot_fig
def make_plot_tot(tot_fig,df,cities):
sub = df[df.denominazione_provincia.isin(cities)]
cds = ColumnDataSource(data=sub)
category_map = CategoricalColorMapper(factors=cities,palette=Category20[20])
plot = tot_fig.circle(x='data', y='totale_casi', source=cds,
color={'field':'denominazione_provincia', 'transform': category_map},
alpha=0.99, size=4, legend_field='denominazione_provincia',
)
tot_fig.legend.location='top_left'
##### nuovi casi ####
def make_new_cases():
newc = figure(x_axis_type='datetime', x_axis_label='data m/d', y_axis_label='nuovi casi')
return newc
def make_plot_newc(newc,df,cities):
category_map = CategoricalColorMapper(factors=cities,palette=Category20[20])
pn = newc.cross(x='data', y='new', source=df.loc[df.denominazione_provincia.isin(cities),:],
color={'field':'denominazione_provincia', 'transform': category_map},
alpha=0.99, size=4, legend_field='denominazione_provincia',
)
newc.legend.location='top_left'
for name, color in zip(cities, itertools.cycle(Category20[20])):
evalDF = loess(df.loc[df.denominazione_provincia==name,'new'], alpha=0.9, poly_degree=1)
newc.line(x=df.loc[df.denominazione_provincia==name,'data'].to_numpy(),y=evalDF['g'].to_numpy()[1:], color=color, legend_label='Trend - '+name)
layout = column(tw,row(make_tot_fig(),make_new_cases()))
def callback (attr, old, new):
l_cities = new.lower().split(' ')
cities = [c.capitalize() for c in l_cities]
sub = df[df.denominazione_provincia.isin(cities)]
tf = make_tot_fig()
nc = make_new_cases()
make_plot_tot(tf,sub,cities)
make_plot_newc(nc,sub,cities)
layout.children[1].children[0:2] = [tf,nc]
# layout.children[1].children[0] = tf
# layout.children[1].children[1] = nc
tw.on_change('value',callback)
# curdoc().add_root(tw)
curdoc().add_root(layout)
# curdoc().add_root(pn)
| 34.365385 | 152 | 0.633277 |
ace3e7ed6ef79d118145413d6d801a9fc636e4e7 | 7,158 | bzl | Python | apple/internal/macos_binary_support.bzl | tnek/rules_apple | 739aa74febeb95902dded57f7a49c85c1f153756 | [
"Apache-2.0"
] | 313 | 2017-03-29T21:47:08.000Z | 2022-03-29T03:09:50.000Z | apple/internal/macos_binary_support.bzl | tnek/rules_apple | 739aa74febeb95902dded57f7a49c85c1f153756 | [
"Apache-2.0"
] | 786 | 2017-03-30T16:15:59.000Z | 2022-03-31T19:58:05.000Z | apple/internal/macos_binary_support.bzl | tnek/rules_apple | 739aa74febeb95902dded57f7a49c85c1f153756 | [
"Apache-2.0"
] | 172 | 2017-04-24T01:55:24.000Z | 2022-03-25T19:23:31.000Z | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Lice
"""Internal helper definitions used by macOS command line rules."""
load(
"@build_bazel_rules_apple//apple/internal:apple_product_type.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/internal:intermediates.bzl",
"intermediates",
)
load(
"@build_bazel_rules_apple//apple/internal:linking_support.bzl",
"linking_support",
)
load(
"@build_bazel_rules_apple//apple/internal:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/internal:resource_actions.bzl",
"resource_actions",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_factory.bzl",
"rule_factory",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_support.bzl",
"rule_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleVersionInfo",
"AppleSupportToolchainInfo",
)
load(
"@bazel_skylib//lib:dicts.bzl",
"dicts",
)
def _macos_binary_infoplist_impl(ctx):
"""Implementation of the internal `macos_command_line_infoplist` rule.
This rule is an internal implementation detail of
`macos_command_line_application` and should not be used directly by clients.
It merges Info.plists as would occur for a bundle but then propagates an
`objc` provider with the necessary linkopts to embed the plist in a binary.
Args:
ctx: The rule context.
Returns:
A `struct` containing the `objc` provider that should be propagated to a
binary that should have this plist embedded.
"""
actions = ctx.actions
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
bundle_id = ctx.attr.bundle_id
executable_name = bundling_support.executable_name(ctx)
rule_descriptor = rule_support.rule_descriptor(ctx)
rule_label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
infoplists = ctx.files.infoplists
if ctx.attr.version and AppleBundleVersionInfo in ctx.attr.version:
version_found = True
else:
version_found = False
if not bundle_id and not infoplists and not version_found:
fail("Internal error: at least one of bundle_id, infoplists, or version " +
"should have been provided")
merged_infoplist = intermediates.file(
actions = actions,
target_name = rule_label.name,
output_discriminator = None,
file_name = "Info.plist",
)
resource_actions.merge_root_infoplists(
actions = actions,
bundle_extension = bundle_extension,
bundle_id = bundle_id,
bundle_name = bundle_name,
executable_name = executable_name,
environment_plist = ctx.file._environment_plist,
include_executable_name = False,
input_plists = infoplists,
launch_storyboard = None,
output_discriminator = None,
output_pkginfo = None,
output_plist = merged_infoplist,
platform_prerequisites = platform_prerequisites,
resolved_plisttool = ctx.attr._toolchain[AppleSupportToolchainInfo].resolved_plisttool,
rule_descriptor = rule_descriptor,
rule_label = rule_label,
version = ctx.attr.version,
)
return [
linking_support.sectcreate_objc_provider(
"__TEXT",
"__info_plist",
merged_infoplist,
),
]
macos_binary_infoplist = rule(
implementation = _macos_binary_infoplist_impl,
attrs = dicts.add(
rule_factory.common_tool_attributes,
{
"bundle_id": attr.string(mandatory = False),
"infoplists": attr.label_list(
allow_files = [".plist"],
mandatory = False,
allow_empty = True,
),
"minimum_deployment_os_version": attr.string(mandatory = False),
"minimum_os_version": attr.string(mandatory = False),
"platform_type": attr.string(
default = str(apple_common.platform_type.macos),
),
"_environment_plist": attr.label(
allow_single_file = True,
default = "@build_bazel_rules_apple//apple/internal:environment_plist_macos",
),
"version": attr.label(providers = [[AppleBundleVersionInfo]]),
"_product_type": attr.string(default = apple_product_type.tool),
},
),
fragments = ["apple", "cpp", "objc"],
)
def _macos_command_line_launchdplist_impl(ctx):
actions = ctx.actions
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
rule_label = ctx.label
launchdplists = ctx.files.launchdplists
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
if not launchdplists:
fail("Internal error: launchdplists should have been provided")
merged_launchdplist = intermediates.file(
actions = actions,
target_name = rule_label.name,
output_discriminator = None,
file_name = "Launchd.plist",
)
resource_actions.merge_resource_infoplists(
actions = actions,
bundle_id = None,
bundle_name_with_extension = bundle_name + bundle_extension,
input_files = launchdplists,
output_discriminator = None,
output_plist = merged_launchdplist,
platform_prerequisites = platform_prerequisites,
resolved_plisttool = ctx.attr._toolchain[AppleSupportToolchainInfo].resolved_plisttool,
rule_label = rule_label,
)
return [
linking_support.sectcreate_objc_provider(
"__TEXT",
"__launchd_plist",
merged_launchdplist,
),
]
macos_command_line_launchdplist = rule(
implementation = _macos_command_line_launchdplist_impl,
attrs = dicts.add(
rule_factory.common_tool_attributes,
{
"launchdplists": attr.label_list(
allow_files = [".plist"],
mandatory = False,
),
"minimum_deployment_os_version": attr.string(mandatory = False),
"minimum_os_version": attr.string(mandatory = False),
"platform_type": attr.string(
default = str(apple_common.platform_type.macos),
),
"_product_type": attr.string(default = apple_product_type.tool),
},
),
fragments = ["apple", "cpp", "objc"],
)
| 34.085714 | 95 | 0.677144 |
ace3e996b58e7ce6565930b57210f3577767b5f5 | 9,534 | py | Python | tests/subprocess2_test.py | fanbojie/depot_tools | 355e97e300e8baceae8353287ad59b915dbb8196 | [
"BSD-3-Clause"
] | 1 | 2019-08-17T04:07:01.000Z | 2019-08-17T04:07:01.000Z | tests/subprocess2_test.py | fanbojie/depot_tools | 355e97e300e8baceae8353287ad59b915dbb8196 | [
"BSD-3-Clause"
] | null | null | null | tests/subprocess2_test.py | fanbojie/depot_tools | 355e97e300e8baceae8353287ad59b915dbb8196 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for subprocess2.py."""
import os
import sys
import unittest
DEPOT_TOOLS = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, DEPOT_TOOLS)
import subprocess
import subprocess2
from third_party import mock
TEST_COMMAND = [
sys.executable,
os.path.join(DEPOT_TOOLS, 'testing_support', 'subprocess2_test_script.py'),
]
class DefaultsTest(unittest.TestCase):
@mock.patch('subprocess2.communicate')
def test_check_call_defaults(self, mockCommunicate):
mockCommunicate.return_value = (('stdout', 'stderr'), 0)
self.assertEqual(
('stdout', 'stderr'), subprocess2.check_call_out(['foo'], a=True))
mockCommunicate.assert_called_with(['foo'], a=True)
@mock.patch('subprocess2.communicate')
def test_capture_defaults(self, mockCommunicate):
mockCommunicate.return_value = (('stdout', 'stderr'), 0)
self.assertEqual(
'stdout', subprocess2.capture(['foo'], a=True))
mockCommunicate.assert_called_with(
['foo'], a=True, stdin=subprocess2.VOID_INPUT, stdout=subprocess2.PIPE)
@mock.patch('subprocess2.Popen')
def test_communicate_defaults(self, mockPopen):
mockPopen().communicate.return_value = ('bar', 'baz')
mockPopen().returncode = -8
self.assertEqual(
(('bar', 'baz'), -8), subprocess2.communicate(['foo'], a=True))
mockPopen.assert_called_with(['foo'], a=True)
@mock.patch('os.environ', {})
@mock.patch('subprocess.Popen.__init__')
def test_Popen_defaults(self, mockPopen):
with mock.patch('sys.platform', 'win32'):
subprocess2.Popen(['foo'], a=True)
mockPopen.assert_called_with(['foo'], a=True, shell=True)
with mock.patch('sys.platform', 'non-win32'):
subprocess2.Popen(['foo'], a=True)
mockPopen.assert_called_with(['foo'], a=True, shell=False)
def test_get_english_env(self):
with mock.patch('sys.platform', 'win32'):
self.assertIsNone(subprocess2.get_english_env({}))
with mock.patch('sys.platform', 'non-win32'):
self.assertIsNone(subprocess2.get_english_env({}))
self.assertIsNone(
subprocess2.get_english_env({'LANG': 'en_XX', 'LANGUAGE': 'en_YY'}))
self.assertEqual(
{'LANG': 'en_US.UTF-8', 'LANGUAGE': 'en_US.UTF-8'},
subprocess2.get_english_env({'LANG': 'bar', 'LANGUAGE': 'baz'}))
@mock.patch('subprocess2.communicate')
def test_check_output_defaults(self, mockCommunicate):
mockCommunicate.return_value = (('stdout', 'stderr'), 0)
self.assertEqual('stdout', subprocess2.check_output(['foo'], a=True))
mockCommunicate.assert_called_with(
['foo'], a=True, stdin=subprocess2.VOID_INPUT, stdout=subprocess2.PIPE)
def _run_test(with_subprocess=True):
"""Runs a tests in 12 combinations:
- With universal_newlines=True and False.
- With LF, CR, and CRLF output.
- With subprocess and subprocess2.
"""
subps = (subprocess2, subprocess) if with_subprocess else (subprocess2,)
no_op = lambda s: s
to_bytes = lambda s: s.encode()
to_cr_bytes = lambda s: s.replace('\n', '\r').encode()
to_crlf_bytes = lambda s: s.replace('\n', '\r\n').encode()
def wrapper(test):
def inner(self):
for subp in subps:
# universal_newlines = False
test(self, to_bytes, TEST_COMMAND, False, subp)
test(self, to_cr_bytes, TEST_COMMAND + ['--cr'], False, subp)
test(self, to_crlf_bytes, TEST_COMMAND + ['--crlf'], False, subp)
# universal_newlines = True
test(self, no_op, TEST_COMMAND, True, subp)
test(self, no_op, TEST_COMMAND + ['--cr'], True, subp)
test(self, no_op, TEST_COMMAND + ['--crlf'], True, subp)
return inner
return wrapper
class SmokeTests(unittest.TestCase):
# Regression tests to ensure that subprocess and subprocess2 have the same
# behavior.
def _check_res(self, res, stdout, stderr, returncode):
(out, err), code = res
self.assertEqual(stdout, out)
self.assertEqual(stderr, err)
self.assertEqual(returncode, code)
def _check_exception(self, subp, e, stdout, stderr, returncode):
"""On exception, look if the exception members are set correctly."""
self.assertEqual(returncode, e.returncode)
if subp is subprocess2 or sys.version_info.major == 3:
self.assertEqual(stdout, e.stdout)
self.assertEqual(stderr, e.stderr)
else:
# subprocess never save the output.
self.assertFalse(hasattr(e, 'stdout'))
self.assertFalse(hasattr(e, 'stderr'))
def test_check_output_no_stdout(self):
for subp in (subprocess, subprocess2):
with self.assertRaises(ValueError):
subp.check_output(TEST_COMMAND, stdout=subp.PIPE)
@_run_test()
def test_check_output_throw_stdout(self, c, cmd, un, subp):
with self.assertRaises(subp.CalledProcessError) as e:
subp.check_output(
cmd + ['--fail', '--stdout'], universal_newlines=un)
self._check_exception(subp, e.exception, c('A\nBB\nCCC\n'), None, 64)
@_run_test()
def test_check_output_throw_no_stderr(self, c, cmd, un, subp):
with self.assertRaises(subp.CalledProcessError) as e:
subp.check_output(
cmd + ['--fail', '--stderr'], universal_newlines=un)
self._check_exception(subp, e.exception, c(''), None, 64)
@_run_test()
def test_check_output_throw_stderr(self, c, cmd, un, subp):
with self.assertRaises(subp.CalledProcessError) as e:
subp.check_output(
cmd + ['--fail', '--stderr'],
stderr=subp.PIPE,
universal_newlines=un)
self._check_exception(subp, e.exception, c(''), c('a\nbb\nccc\n'), 64)
@_run_test()
def test_check_output_throw_stderr_stdout(self, c, cmd, un, subp):
with self.assertRaises(subp.CalledProcessError) as e:
subp.check_output(
cmd + ['--fail', '--stderr'],
stderr=subp.STDOUT,
universal_newlines=un)
self._check_exception(subp, e.exception, c('a\nbb\nccc\n'), None, 64)
def test_check_call_throw(self):
for subp in (subprocess, subprocess2):
with self.assertRaises(subp.CalledProcessError) as e:
subp.check_call(TEST_COMMAND + ['--fail', '--stderr'])
self._check_exception(subp, e.exception, None, None, 64)
@_run_test()
def test_redirect_stderr_to_stdout_pipe(self, c, cmd, un, subp):
# stderr output into stdout.
proc = subp.Popen(
cmd + ['--stderr'],
stdout=subp.PIPE,
stderr=subp.STDOUT,
universal_newlines=un)
res = proc.communicate(), proc.returncode
self._check_res(res, c('a\nbb\nccc\n'), None, 0)
@_run_test()
def test_redirect_stderr_to_stdout(self, c, cmd, un, subp):
# stderr output into stdout but stdout is not piped.
proc = subp.Popen(
cmd + ['--stderr'], stderr=subprocess2.STDOUT, universal_newlines=un)
res = proc.communicate(), proc.returncode
self._check_res(res, None, None, 0)
@_run_test()
def test_stderr(self, c, cmd, un, subp):
cmd = ['expr', '1', '/', '0']
if sys.platform == 'win32':
cmd = ['cmd.exe', '/c', 'exit', '1']
p1 = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=False)
p2 = subprocess2.Popen(cmd, stderr=subprocess.PIPE, shell=False)
r1 = p1.communicate()
r2 = p2.communicate()
self.assertEqual(r1, r2)
@_run_test(with_subprocess=False)
def test_stdin(self, c, cmd, un, subp):
stdin = c('0123456789')
res = subprocess2.communicate(
cmd + ['--read'],
stdin=stdin,
universal_newlines=un)
self._check_res(res, None, None, 10)
@_run_test(with_subprocess=False)
def test_stdin_empty(self, c, cmd, un, subp):
stdin = c('')
res = subprocess2.communicate(
cmd + ['--read'],
stdin=stdin,
universal_newlines=un)
self._check_res(res, None, None, 0)
def test_stdin_void(self):
res = subprocess2.communicate(
TEST_COMMAND + ['--read'],
stdin=subprocess2.VOID_INPUT)
self._check_res(res, None, None, 0)
@_run_test(with_subprocess=False)
def test_stdin_void_stdout(self, c, cmd, un, subp):
# Make sure a mix ofsubprocess2.VOID andsubprocess2.PIPE works.
res = subprocess2.communicate(
cmd + ['--stdout', '--read'],
stdin=subprocess2.VOID_INPUT,
stdout=subprocess2.PIPE,
universal_newlines=un,
shell=False)
self._check_res(res, c('A\nBB\nCCC\n'), None, 0)
@_run_test(with_subprocess=False)
def test_stdout_void(self, c, cmd, un, subp):
res = subprocess2.communicate(
cmd + ['--stdout', '--stderr'],
stdout=subprocess2.VOID,
stderr=subprocess2.PIPE,
universal_newlines=un)
self._check_res(res, None, c('a\nbb\nccc\n'), 0)
@_run_test(with_subprocess=False)
def test_stderr_void(self, c, cmd, un, subp):
res = subprocess2.communicate(
cmd + ['--stdout', '--stderr'],
stdout=subprocess2.PIPE,
stderr=subprocess2.VOID,
universal_newlines=un)
self._check_res(res, c('A\nBB\nCCC\n'), None, 0)
@_run_test(with_subprocess=False)
def test_stdout_void_stderr_redirect(self, c, cmd, un, subp):
res = subprocess2.communicate(
cmd + ['--stdout', '--stderr'],
stdout=subprocess2.VOID,
stderr=subprocess2.STDOUT,
universal_newlines=un)
self._check_res(res, None, None, 0)
if __name__ == '__main__':
unittest.main()
| 35.707865 | 79 | 0.665723 |
ace3e99a53daebf72f6456e5230c318bdbc7ae3f | 806 | py | Python | apps/rutinator/dashboard/views.py | mariusaarsnes/onlineweb4 | 3495321dabfd7a7236e6d841b004e9f855b6f30e | [
"MIT"
] | null | null | null | apps/rutinator/dashboard/views.py | mariusaarsnes/onlineweb4 | 3495321dabfd7a7236e6d841b004e9f855b6f30e | [
"MIT"
] | null | null | null | apps/rutinator/dashboard/views.py | mariusaarsnes/onlineweb4 | 3495321dabfd7a7236e6d841b004e9f855b6f30e | [
"MIT"
] | null | null | null | from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView, UpdateView
from apps.dashboard.tools import DashboardMixin
from apps.rutinator.dashboard.forms import NewTaskForm
from apps.rutinator.models import Task
class TaskListView(DashboardMixin, ListView):
model = Task
queryset = Task.objects.all()
template_name = "rutinator/dashboard/index.html"
class CreateTaskView(DashboardMixin, CreateView):
model = Task
form_class = NewTaskForm
template_name = 'rutinator/dashboard/create.html'
success_url = reverse_lazy('dashboard:task_view')
class EditTaskView(DashboardMixin, UpdateView):
model = Task
form_class = NewTaskForm
template_name = 'rutinator/dashboard/edit.html'
success_url = reverse_lazy('dashboard:task_view')
| 29.851852 | 65 | 0.777916 |
ace3e9d46a93cb484e93f52e3e6456fee2188e41 | 3,717 | py | Python | torch2trt/converters/GroupNorm.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | torch2trt/converters/GroupNorm.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | torch2trt/converters/GroupNorm.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | from torch2trt.torch2trt import *
from torch2trt.plugins import *
@tensorrt_converter('torch.nn.GroupNorm.forward')
def convert_GroupNorm(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = trt_(ctx.network, input)
output = ctx.method_return
num_channels = module.num_channels
num_groups = module.num_groups
weight = module.weight.detach().cpu().numpy()
bias = module.bias.detach().cpu().numpy()
eps = module.eps
plugin = create_groupnorm_plugin("groupnorm_" + str(id(module)),
num_groups=num_groups,
num_channels=num_channels,
W=weight,
B=bias,
eps=eps
)
custom_layer = ctx.network.add_plugin_v2(
inputs=[input_trt], plugin=plugin)
output._trt = custom_layer.get_output(0)
# @tensorrt_converter('torch.nn.GroupNorm.forward')
# def convert_GroupNorm(ctx):
# module = ctx.method_args[0]
# input = ctx.method_args[1]
# input_trt = trt_(ctx.network, input)
# output = ctx.method_return
# num_channels = module.num_channels
# num_groups = module.num_groups
# weight = module.weight.detach().cpu().numpy()
# bias = module.bias.detach().cpu().numpy()
# eps = module.eps
# input_shape_trt = ctx.network.add_shape(input_trt).get_output(0)
# input_batch_trt = ctx.network.add_slice(input_shape_trt, [0], [1], [1]).get_output(0)
# input_channel_trt = ctx.network.add_slice(input_shape_trt, [1], [1], [1]).get_output(0)
# input_hw_trt = ctx.network.add_slice(input_shape_trt, [2], [2], [1]).get_output(0)
# group_length = num_channels//num_groups
# num_groups_trt = trt_(ctx.network, torch.tensor([num_groups],dtype=torch.int32).to(input.device))
# group_length_trt = trt_(ctx.network, torch.tensor([group_length],dtype=torch.int32).to(input.device))
# new_shape_trt = ctx.network.add_concatenation([input_batch_trt, num_groups_trt, group_length_trt, input_hw_trt]).get_output(0)
# layer = ctx.network.add_shuffle(input_trt)
# layer.set_input(1, new_shape_trt)
# new_input_trt = layer.get_output(0)
# group_trts = []
# eps_np = np.array([eps], dtype=np.float32)
# keep_dims = True
# reduce_axes = torch_dim_to_trt_axes(tuple(range(2,5)))
# mean_trt = ctx.network.add_reduce(new_input_trt, trt.ReduceOperation.AVG, reduce_axes, keep_dims).get_output(0)
# # compute variance over spatial (include eps, to reduce layer count)
# delta_trt = ctx.network.add_elementwise(new_input_trt, mean_trt, trt.ElementWiseOperation.SUB).get_output(0)
# var_trt = ctx.network.add_scale(delta_trt, trt.ScaleMode.UNIFORM, np.zeros_like(eps_np), np.ones_like(eps_np), 2 * np.ones_like(eps_np)).get_output(0)
# var_trt = ctx.network.add_reduce(var_trt, trt.ReduceOperation.AVG, reduce_axes, keep_dims).get_output(0)
# # compute sqrt(var + eps)
# var_trt = ctx.network.add_scale(var_trt, trt.ScaleMode.UNIFORM, eps_np, np.ones_like(eps_np), 0.5 * np.ones_like(eps_np)).get_output(0)
# # compute final result
# norm_input_trt = ctx.network.add_elementwise(delta_trt, var_trt, trt.ElementWiseOperation.DIV).get_output(0)
# layer = ctx.network.add_shuffle(norm_input_trt)
# layer.set_input(1, input_shape_trt)
# norm_input_trt = layer.get_output(0)
# layer = ctx.network.add_scale(norm_input_trt, trt.ScaleMode.CHANNEL, bias, weight, np.ones_like(bias))
# output._trt = layer.get_output(0)
| 44.783133 | 157 | 0.656712 |
ace3ebcd5f4dcb356cd9224bcb1c666c920845eb | 6,798 | py | Python | Moon_classification_Exercise/source/train.py | NwekeChidi/Udacity_ML_with_SageMaker | fb707639cf622f8f3b104eecddc52aa09fea709b | [
"MIT"
] | null | null | null | Moon_classification_Exercise/source/train.py | NwekeChidi/Udacity_ML_with_SageMaker | fb707639cf622f8f3b104eecddc52aa09fea709b | [
"MIT"
] | null | null | null | Moon_classification_Exercise/source/train.py | NwekeChidi/Udacity_ML_with_SageMaker | fb707639cf622f8f3b104eecddc52aa09fea709b | [
"MIT"
] | null | null | null | from __future__ import print_function # future proof
import argparse
import sys
import os
import json
import pandas as pd
# pytorch
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
# import model
from model import SimpleNet
def model_fn(model_dir):
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SimpleNet(model_info['input_dim'],
model_info['hidden_dim'],
model_info['output_dim'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
return model.to(device)
# Load the training data from a csv file
def _get_train_loader(batch_size, data_dir):
print("Get data loader.")
# read in csv file
train_data = pd.read_csv(os.path.join(data_dir, "train.csv"), header=None, names=None)
# labels are first column
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
# features are the rest
train_x = torch.from_numpy(train_data.drop([0], axis=1).values).float()
# create dataset
train_ds = torch.utils.data.TensorDataset(train_x, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
# Provided train function
def train(model, train_loader, epochs, optimizer, criterion, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
optimizer - The optimizer to use during training.
criterion - The loss function used for training.
device - Where the model and data should be loaded (gpu or cpu).
"""
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader, 1):
# prep data
data, target = data.to(device), target.to(device)
optimizer.zero_grad() # zero accumulated gradients
# get output of SimpleNet
output = model(data)
# calculate loss and perform backprop
loss = criterion(output, target)
loss.backward()
optimizer.step()
total_loss += loss.item()
# print loss stats
print("Epoch: {}, Loss: {}".format(epoch, total_loss / len(train_loader)))
# save after all epochs
save_model(model, args.model_dir)
# Provided model saving functions
def save_model(model, model_dir):
print("Saving the model.")
path = os.path.join(model_dir, 'model.pth')
# save state dictionary
torch.save(model.cpu().state_dict(), path)
def save_model_params(model, model_dir):
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'input_dim': args.input_dim,
'hidden_dim': args.hidden_dim,
'output_dim': args.output_dim
}
torch.save(model_info, f)
## TODO: Complete the main code
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments
# when this script is executed, during a training job
# Here we set up an argument parser to easily access the parameters
parser = argparse.ArgumentParser()
# SageMaker parameters, like the directories for training data and saving models; set automatically
# Do not need to change
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
# Training Parameters, given
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
## TODO: Add args for the three model parameters: input_dim, hidden_dim, output_dim
# Model parameters
parser.add_argument('--input_dim', type=int, default=2, metavar='N',
help='input dimension for training (default: 2)')
parser.add_argument('--hidden_dim', type=int, default=20, metavar='N',
help='hidden dimension for training (default: 20)')
parser.add_argument('--output_dim', type=int, default=1, metavar='N',
help='number of classes (default: 1)')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set the seed for generating random numbers
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# get train loader
train_loader = _get_train_loader(args.batch_size, args.data_dir) # data_dir from above..
## TODO: Build the model by passing in the input params
# To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim
# Don't forget to move your model .to(device) to move to GPU , if appropriate
model = SimpleNet(args.input_dim, args.hidden_dim, args.output_dim).to(device)
# Given: save the parameters used to construct the model
save_model_params(model, args.model_dir)
## TODO: Define an optimizer and loss function for training
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCELoss()
# Trains the model (given line of code, which calls the above training function)
# This function *also* saves the model state dictionary
train(model, train_loader, args.epochs, optimizer, criterion, device)
| 37.558011 | 103 | 0.659753 |
ace3ec2af742ee880591278bd25e1cf64be25c6b | 2,114 | py | Python | apps/AuthApp/module/email.py | creativeweb-aj/BackEnd | 03c7e396b83fb29c4dab50d76e57ddc537bbe8d0 | [
"Apache-2.0"
] | null | null | null | apps/AuthApp/module/email.py | creativeweb-aj/BackEnd | 03c7e396b83fb29c4dab50d76e57ddc537bbe8d0 | [
"Apache-2.0"
] | null | null | null | apps/AuthApp/module/email.py | creativeweb-aj/BackEnd | 03c7e396b83fb29c4dab50d76e57ddc537bbe8d0 | [
"Apache-2.0"
] | null | null | null | import datetime
from apps.AuthApp.models import EmailHandler
from settings.extension import db
from flask import render_template_string
import uuid
import smtplib
import os
API_BASE_URL = os.environ.get('API_BASE_URL', 'http://localhost:5000')
class EmailService:
def saveEmail(self, user):
subject = "Creative Web Verify Your Email"
key = str(uuid.uuid4())
url = API_BASE_URL + "/email/verify?id=" + key
body = render_template_string("Hello, This is you verification link {{ link }}", link=url)
dateTime = str(datetime.datetime.timestamp(datetime.datetime.now()))
obj = EmailHandler(user_id=user.id, subject=subject, body=body, uuid=key,
created_on=dateTime, updated_on=dateTime)
db.session.add(obj)
db.session.commit()
self.sendEmail(obj, user)
return obj
def sendEmail(self, data, user):
subject = data.subject
body = data.body
email = user.email
emailId = os.environ.get('MAIL_USERNAME')
password = os.environ.get('MAIL_PASSWORD')
message = """From: From Person <%s>
To: To Person <%s>
Subject: %s
%s
""" % (emailId, email, subject, body)
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(emailId, password)
s.set_debuglevel(True)
try:
# sending the mail
s.sendmail(emailId, [email], message)
except smtplib.SMTPException:
print(smtplib.SMTPException)
# terminating the session
s.quit()
self.updateEmail(data)
return data
def updateEmail(self, data):
id = data.id
dateTime = str(datetime.datetime.timestamp(datetime.datetime.now()))
emailObj = EmailHandler.query.filter_by(id=id, is_sent=False).first()
emailObj.is_sent = True
emailObj.sent_on = dateTime
emailObj.updated_on = dateTime
db.session.commit()
return emailObj
| 31.552239 | 98 | 0.617786 |
ace3edb617752601cce33ce7f555d844c765d89b | 6,978 | py | Python | allennlp/data/tokenizers/word_splitter.py | danyaljj/allennlp | 5846855d363df8f84d0a12bc6867ac66f539c2a3 | [
"Apache-2.0"
] | null | null | null | allennlp/data/tokenizers/word_splitter.py | danyaljj/allennlp | 5846855d363df8f84d0a12bc6867ac66f539c2a3 | [
"Apache-2.0"
] | null | null | null | allennlp/data/tokenizers/word_splitter.py | danyaljj/allennlp | 5846855d363df8f84d0a12bc6867ac66f539c2a3 | [
"Apache-2.0"
] | null | null | null | import re
from typing import List
from overrides import overrides
import spacy
from allennlp.common import Registrable
from allennlp.common.util import get_spacy_model
from allennlp.data.tokenizers.token import Token
class WordSplitter(Registrable):
"""
A ``WordSplitter`` splits strings into words. This is typically called a "tokenizer" in NLP,
because splitting strings into characters is trivial, but we use ``Tokenizer`` to refer to the
higher-level object that splits strings into tokens (which could just be character tokens).
So, we're using "word splitter" here for this.
"""
default_implementation = 'spacy'
def batch_split_words(self, sentences: List[str]) -> List[List[Token]]:
"""
Spacy needs to do batch processing, or it can be really slow. This method lets you take
advantage of that if you want. Default implementation is to just iterate of the sentences
and call ``split_words``, but the ``SpacyWordSplitter`` will actually do batched
processing.
"""
return [self.split_words(sentence) for sentence in sentences]
def split_words(self, sentence: str) -> List[Token]:
"""
Splits ``sentence`` into a list of :class:`Token` objects.
"""
raise NotImplementedError
@WordSplitter.register('simple')
class SimpleWordSplitter(WordSplitter):
"""
Does really simple tokenization. NLTK was too slow, so we wrote our own simple tokenizer
instead. This just does an initial split(), followed by some heuristic filtering of each
whitespace-delimited token, separating contractions and punctuation. We assume lower-cased,
reasonably well-formed English sentences as input.
"""
def __init__(self):
# These are certainly incomplete. But at least it's a start.
self.special_cases = set(['mr.', 'mrs.', 'etc.', 'e.g.', 'cf.', 'c.f.', 'eg.', 'al.'])
self.contractions = set(["n't", "'s", "'ve", "'re", "'ll", "'d", "'m"])
self.contractions |= set([x.replace("'", "’") for x in self.contractions])
self.ending_punctuation = set(['"', "'", '.', ',', ';', ')', ']', '}', ':', '!', '?', '%', '”', "’"])
self.beginning_punctuation = set(['"', "'", '(', '[', '{', '#', '$', '“', "‘"])
@overrides
def split_words(self, sentence: str) -> List[Token]:
"""
Splits a sentence into word tokens. We handle four kinds of things: words with punctuation
that should be ignored as a special case (Mr. Mrs., etc.), contractions/genitives (isn't,
don't, Matt's), and beginning and ending punctuation ("antennagate", (parentheticals), and
such.).
The basic outline is to split on whitespace, then check each of these cases. First, we
strip off beginning punctuation, then strip off ending punctuation, then strip off
contractions. When we strip something off the beginning of a word, we can add it to the
list of tokens immediately. When we strip it off the end, we have to save it to be added
to after the word itself has been added. Before stripping off any part of a token, we
first check to be sure the token isn't in our list of special cases.
"""
fields = sentence.split()
tokens: List[Token] = []
for field in fields:
add_at_end: List[Token] = []
while self._can_split(field) and field[0] in self.beginning_punctuation:
tokens.append(Token(field[0]))
field = field[1:]
while self._can_split(field) and field[-1] in self.ending_punctuation:
add_at_end.insert(0, Token(field[-1]))
field = field[:-1]
# There could (rarely) be several contractions in a word, but we check contractions
# sequentially, in a random order. If we've removed one, we need to check again to be
# sure there aren't others.
remove_contractions = True
while remove_contractions:
remove_contractions = False
for contraction in self.contractions:
if self._can_split(field) and field.lower().endswith(contraction):
add_at_end.insert(0, Token(field[-len(contraction):]))
field = field[:-len(contraction)]
remove_contractions = True
if field:
tokens.append(Token(field))
tokens.extend(add_at_end)
return tokens
def _can_split(self, token: str):
return token and token.lower() not in self.special_cases
@WordSplitter.register('letters_digits')
class LettersDigitsWordSplitter(WordSplitter):
"""
A ``WordSplitter`` which keeps runs of (unicode) letters and runs of digits together, while
every other non-whitespace character becomes a separate word.
"""
@overrides
def split_words(self, sentence: str) -> List[Token]:
# We use the [^\W\d_] pattern as a trick to match unicode letters
tokens = [Token(m.group(), idx=m.start())
for m in re.finditer(r'[^\W\d_]+|\d+|\S', sentence)]
return tokens
@WordSplitter.register('just_spaces')
class JustSpacesWordSplitter(WordSplitter):
"""
A ``WordSplitter`` that assumes you've already done your own tokenization somehow and have
separated the tokens by spaces. We just split the input string on whitespace and return the
resulting list. We use a somewhat odd name here to avoid coming too close to the more
commonly used ``SpacyWordSplitter``.
Note that we use ``sentence.split()``, which means that the amount of whitespace between the
tokens does not matter. This will never result in spaces being included as tokens.
"""
@overrides
def split_words(self, sentence: str) -> List[Token]:
return [Token(t) for t in sentence.split()]
def _remove_spaces(tokens: List[spacy.tokens.Token]) -> List[spacy.tokens.Token]:
return [token for token in tokens if not token.is_space]
@WordSplitter.register('spacy')
class SpacyWordSplitter(WordSplitter):
"""
A ``WordSplitter`` that uses spaCy's tokenizer. It's fast and reasonable - this is the
recommended ``WordSplitter``.
"""
def __init__(self,
language: str = 'en_core_web_sm',
pos_tags: bool = False,
parse: bool = False,
ner: bool = False) -> None:
self.spacy = get_spacy_model(language, pos_tags, parse, ner)
@overrides
def batch_split_words(self, sentences: List[str]) -> List[List[Token]]:
return [_remove_spaces(tokens)
for tokens in self.spacy.pipe(sentences, n_threads=-1)]
@overrides
def split_words(self, sentence: str) -> List[Token]:
# This works because our Token class matches spacy's.
split = self.spacy(sentence)
return _remove_spaces(split)
| 45.019355 | 109 | 0.640011 |
ace3ee8d6358cc185491b729caac48d04097bb62 | 3,068 | py | Python | czech_holidays.py | honzajavorek/czech-holidays | 220e520dc9103230a827aea5554873b4154717c9 | [
"MIT"
] | 5 | 2015-11-02T00:40:43.000Z | 2019-03-05T07:27:05.000Z | czech_holidays.py | honzajavorek/czech-holidays | 220e520dc9103230a827aea5554873b4154717c9 | [
"MIT"
] | 9 | 2015-06-27T22:37:43.000Z | 2021-06-25T15:18:37.000Z | czech_holidays.py | honzajavorek/czech-holidays | 220e520dc9103230a827aea5554873b4154717c9 | [
"MIT"
] | 3 | 2015-06-27T20:34:05.000Z | 2016-06-07T15:52:29.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date, timedelta
from dateutil.easter import easter as calculate_easter
__title__ = 'czech-holidays'
__version__ = '0.1.3'
__author__ = 'Honza Javorek'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013 Honza Javorek'
__all__ = ('Holiday', 'Holidays', 'holidays')
class Holiday(date):
def __new__(cls, year, month, day, name, name_en):
obj = date.__new__(cls, year, month, day)
obj.name = name
obj.name_en = name_en
return obj
class Holidays(list):
def __init__(self, year=None):
year = year or date.today().year
easter = calculate_easter(year) + timedelta(1)
easter_friday = easter - timedelta(3)
self.extend((
Holiday(
year, 1, 1,
"Nový rok",
"New Year's Day",
),
Holiday(
year, 1, 1,
"Den obnovy samostatného českého státu",
"Restoration Day of the Independent Czech State",
),
Holiday(
easter.year, easter.month, easter.day,
"Velikonoční pondělí",
"Easter Monday",
),
Holiday(
easter_friday.year, easter_friday.month, easter_friday.day,
"Velký pátek",
"Good Friday",
),
Holiday(
year, 5, 1,
"Svátek práce",
"Labour Day",
),
Holiday(
year, 5, 8,
"Den vítězství",
"Liberation Day",
),
Holiday(
year, 7, 5,
"Den slovanských věrozvěstů Cyrila a Metoděje",
"Saints Cyril and Methodius Day",
),
Holiday(
year, 7, 6,
"Den upálení mistra Jana Husa",
"Jan Hus Day",
),
Holiday(
year, 9, 28,
"Den české státnosti",
"St. Wenceslas Day (Czech Statehood Day)",
),
Holiday(
year, 10, 28,
"Den vzniku samostatného československého státu",
"Independent Czechoslovak State Day",
),
Holiday(
year, 11, 17,
"Den boje za svobodu a demokracii",
"Struggle for Freedom and Democracy Day",
),
Holiday(
year, 12, 24,
"Štědrý den",
"Christmas Eve",
),
Holiday(
year, 12, 25,
"1. svátek vánoční",
"Christmas Day",
),
Holiday(
year, 12, 26,
"2. svátek vánoční",
"St. Stephen's Day (The Second Christmas Day)",
),
))
self.easter = self[2]
self.christmas = self[10]
holidays = Holidays()
| 26.912281 | 75 | 0.454042 |
ace3ef96e45797a3d1253fc2cdceb4d91ef9718d | 6,858 | py | Python | nilmtk/dataset_converters/greend/convert_greend.py | camilomarino/nilmtk | a90bd958589b18bbb777b673b505f48628855bcc | [
"Apache-2.0"
] | null | null | null | nilmtk/dataset_converters/greend/convert_greend.py | camilomarino/nilmtk | a90bd958589b18bbb777b673b505f48628855bcc | [
"Apache-2.0"
] | null | null | null | nilmtk/dataset_converters/greend/convert_greend.py | camilomarino/nilmtk | a90bd958589b18bbb777b673b505f48628855bcc | [
"Apache-2.0"
] | null | null | null | from os import listdir, getcwd
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import numpy as np
import datetime
import time
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
import warnings
import numpy as np
from io import StringIO
from multiprocessing import Pool
from nilmtk.utils import get_module_directory
def _get_blocks(filename):
'''
Return a list of dataframes from a GREEND CSV file
GREEND files can be interpreted as multiple CSV blocks concatenated into
a single file per date. Since the columns of the individual blocks can
vary in a single file, they need to be read separately.
There are some issues we need to handle in the converter:
- the headers from the multiple blocks
- corrupted data (lines with null chars, broken lines)
- more fields than specified in header
'''
block_data = None
dfs = []
previous_header = None
print(filename)
# Use float64 for timestamps and float32 for the rest of the columns
dtypes = {}
dtypes['timestamp'] = np.float64
def _process_block():
if block_data is None:
return
block_data.seek(0)
try:
# ignore extra fields for some files
error_bad_lines = not (
('building5' in filename and 'dataset_2014-02-04.csv' in filename)
)
df = pd.read_csv(block_data, index_col='timestamp', dtype=dtypes, error_bad_lines=error_bad_lines)
except: #(pd.errors.ParserError, ValueError, TypeError):
print("ERROR", filename)
raise
df.index = pd.to_datetime(df.index, unit='s')
df = df.tz_localize("UTC").tz_convert("CET").sort_index()
dfs.append(df)
block_data.close()
special_check = (
('dataset_2014-01-28.csv' in filename and 'building5' in filename) or
('dataset_2014-09-02.csv' in filename and 'building6' in filename)
)
with open(filename, 'r') as f:
for line in f:
# At least one file have a bunch of nulls present, let's clean the data
line = line.strip('\0')
if 'time' in line:
# Found a new block
if not line.startswith('time'):
# Some lines are corrupted, e.g. 1415605814.541311,0.0,NULL,NUtimestamp,000D6F00029C2918...
line = line[line.find('time'):]
if previous_header == line.strip():
# Same exact header, we can treat it as the same block
# print('Skipping split')
continue
# Using a defaultdict for the dtypes didn't work with read_csv,
# so we fill a normal dict when we find the columns
cols = line.strip().split(',')[1:]
for col in cols:
dtypes[col] = np.float32
# print('Found new block')
_process_block()
block_data = StringIO()
previous_header = line.strip()
if special_check:
if ('0.072.172091508705606' in line or
'1409660828.0753369,NULL,NUL' == line):
continue
block_data.write(line)
# Process the remaining block
_process_block()
return (filename, dfs)
def _get_houses(greend_path):
house_list = listdir(greend_path)
return [h for h in house_list if isdir(join(greend_path,h))]
def convert_greend(greend_path, hdf_filename, use_mp=True):
"""
Parameters
----------
greend_path : str
The root path of the greend dataset.
hdf_filename : str
The destination HDF5 filename (including path and suffix).
use_mp : bool
Defaults to True. Use multiprocessing to load the files for
each building.
The raw dataset can be downloaded from:
https://docs.google.com/forms/d/e/1FAIpQLSf3Tbr7IDoSORNFw7dAGD2PB6kSO98RRiVpmKOWOZ52ULAMzA/viewform
"""
store = pd.HDFStore(hdf_filename, 'w', complevel=5, complib='zlib')
houses = sorted(_get_houses(greend_path))
print('Houses found:', houses)
if use_mp:
pool = Pool()
h = 1 # nilmtk counts buildings from 1 not from 0 as we do, so everything is shifted by 1
for house in houses:
print('Loading', house)
abs_house = join(greend_path, house)
dates = [d for d in listdir(abs_house) if d.startswith('dataset')]
target_filenames = [join(abs_house, date) for date in dates]
if use_mp:
house_data = pool.map(_get_blocks, target_filenames)
# Ensure the blocks are sorted by date and make a plain list
house_data_dfs = []
for date, data in sorted(house_data, key=lambda x: x[0]):
house_data_dfs.extend(data)
else:
house_data_dfs = []
for fn in target_filenames:
house_data_dfs.extend(_get_blocks(fn)[1])
overall_df = pd.concat(house_data_dfs, sort=False).sort_index()
dups_in_index = overall_df.index.duplicated(keep='first')
if dups_in_index.any():
print("Found duplicated values in index, dropping them.")
overall_df = overall_df[~dups_in_index]
m = 1
for column in overall_df.columns:
print("meter {}: {}".format(m, column))
key = Key(building=h, meter=m)
print("Putting into store...")
df = overall_df[column].to_frame() #.dropna(axis=0)
# if drop_duplicates:
# print("Dropping duplicated values in data...")
# df = df.drop_duplicates()
df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), df, format = 'table')
m += 1
# print('Flushing store...')
# store.flush()
h += 1
# retrieve the dataset metadata in the metadata subfolder
metadata_dir = join(get_module_directory(), 'dataset_converters', 'greend', 'metadata')
convert_yaml_to_hdf5(metadata_dir, hdf_filename)
# close h5
store.close()
#is only called when this file is the main file... only test purpose
if __name__ == '__main__':
t1 = time.time()
convert_greend('GREEND_0-2_300615',
'GREEND_0-2_300615.h5')
dt = time.time() - t1
print()
print()
print('Time passed: {}:{}'.format(int(dt/60), int(dt%60)))
| 35.71875 | 111 | 0.58851 |
ace3f054ba952f012aa5ca642e490b1f45f8ba1d | 5,830 | py | Python | tensorflow/python/util/tf_export_test.py | Jibanprakash/tensorflow | a8ae26ae1aa7a33b48cca8bf12c42ab7503a45cf | [
"Apache-2.0"
] | 54 | 2018-05-29T19:52:44.000Z | 2021-11-30T10:41:12.000Z | tensorflow/python/util/tf_export_test.py | caelean/tensorflow | dcb10b1d557168646204239bea6ca5bf1abc40a3 | [
"Apache-2.0"
] | 20 | 2017-12-06T18:20:54.000Z | 2021-11-10T09:54:23.000Z | tensorflow/python/util/tf_export_test.py | caelean/tensorflow | dcb10b1d557168646204239bea6ca5bf1abc40a3 | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
export_decorator(_test_function)
def testEAllowMultipleExports(self):
_test_function._tf_api_names = ['name1', 'name2']
tf_export.tf_export('nameRed', 'nameBlue', allow_multiple_exports=True)(
_test_function)
self.assertEquals(['name1', 'name2', 'nameRed', 'nameBlue'],
_test_function._tf_api_names)
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function, exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
if __name__ == '__main__':
test.main()
| 35.54878 | 80 | 0.726072 |
ace3f0eb3262fb8350ae2cfca2afec63685f87ab | 7,795 | py | Python | Contents/Libraries/Shared/guessit/rules/properties/release_group.py | jippo015/Sub-Zero.bundle | 734e0f7128c05c0f639e11e7dfc77daa1014064b | [
"MIT"
] | 1,553 | 2015-11-09T02:17:06.000Z | 2022-03-31T20:24:52.000Z | Contents/Libraries/Shared/guessit/rules/properties/release_group.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 691 | 2015-11-05T21:32:26.000Z | 2022-03-17T10:52:45.000Z | Contents/Libraries/Shared/guessit/rules/properties/release_group.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 162 | 2015-11-06T19:38:55.000Z | 2022-03-16T02:42:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
release_group property
"""
import copy
from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch
from ..common import seps
from ..common.expected import build_expected_function
from ..common.comparators import marker_sorted
from ..common.formatters import cleanup
from ..common.validators import int_coercable, seps_surround
from ..properties.title import TitleFromPosition
def release_group():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk()
expected_group = build_expected_function('expected_group')
rebulk.functional(expected_group, name='release_group', tags=['expected'],
validator=seps_surround,
conflict_solver=lambda match, other: other,
disabled=lambda context: not context.get('expected_group'))
return rebulk.rules(SceneReleaseGroup, AnimeReleaseGroup)
forbidden_groupnames = ['rip', 'by', 'for', 'par', 'pour', 'bonus']
groupname_ignore_seps = '[]{}()'
groupname_seps = ''.join([c for c in seps if c not in groupname_ignore_seps])
def clean_groupname(string):
"""
Removes and strip separators from input_string
:param string:
:type string:
:return:
:rtype:
"""
string = string.strip(groupname_seps)
if not (string.endswith(tuple(groupname_ignore_seps)) and string.startswith(tuple(groupname_ignore_seps))) \
and not any(i in string.strip(groupname_ignore_seps) for i in groupname_ignore_seps):
string = string.strip(groupname_ignore_seps)
for forbidden in forbidden_groupnames:
if string.lower().startswith(forbidden) and string[len(forbidden):len(forbidden)+1] in seps:
string = string[len(forbidden):]
string = string.strip(groupname_seps)
if string.lower().endswith(forbidden) and string[-len(forbidden)-1:-len(forbidden)] in seps:
string = string[:len(forbidden)]
string = string.strip(groupname_seps)
return string
_scene_previous_names = ['video_codec', 'format', 'video_api', 'audio_codec', 'audio_profile', 'video_profile',
'audio_channels', 'screen_size', 'other', 'container', 'language', 'subtitle_language',
'subtitle_language.suffix', 'subtitle_language.prefix', 'language.suffix']
_scene_previous_tags = ['release-group-prefix']
class SceneReleaseGroup(Rule):
"""
Add release_group match in existing matches (scene format).
Something.XViD-ReleaseGroup.mkv
"""
dependency = [TitleFromPosition]
consequence = AppendMatch
properties = {'release_group': [None]}
def when(self, matches, context):
# If a release_group is found before, ignore this kind of release_group rule.
ret = []
for filepart in marker_sorted(matches.markers.named('path'), matches):
# pylint:disable=cell-var-from-loop
start, end = filepart.span
titles = matches.named('title', predicate=lambda m: m.start >= start and m.end <= end)
def keep_only_first_title(match):
"""
Keep only first title from this filepart, as other ones are most likely release group.
:param match:
:type match:
:return:
:rtype:
"""
return match in titles[1:]
last_hole = matches.holes(start, end + 1, formatter=clean_groupname,
ignore=keep_only_first_title,
predicate=lambda hole: cleanup(hole.value), index=-1)
if last_hole:
def previous_match_filter(match):
"""
Filter to apply to find previous match
:param match:
:type match:
:return:
:rtype:
"""
if match.start < filepart.start:
return False
return not match.private or match.name in _scene_previous_names
previous_match = matches.previous(last_hole,
previous_match_filter,
index=0)
if previous_match and (previous_match.name in _scene_previous_names or
any(tag in previous_match.tags for tag in _scene_previous_tags)) and \
not matches.input_string[previous_match.end:last_hole.start].strip(seps) \
and not int_coercable(last_hole.value.strip(seps)):
last_hole.name = 'release_group'
last_hole.tags = ['scene']
# if hole is inside a group marker with same value, remove [](){} ...
group = matches.markers.at_match(last_hole, lambda marker: marker.name == 'group', 0)
if group:
group.formatter = clean_groupname
if group.value == last_hole.value:
last_hole.start = group.start + 1
last_hole.end = group.end - 1
last_hole.tags = ['anime']
ignored_matches = matches.range(last_hole.start, last_hole.end, keep_only_first_title)
for ignored_match in ignored_matches:
matches.remove(ignored_match)
ret.append(last_hole)
return ret
class AnimeReleaseGroup(Rule):
"""
Add release_group match in existing matches (anime format)
...[ReleaseGroup] Something.mkv
"""
dependency = [SceneReleaseGroup, TitleFromPosition]
consequence = [RemoveMatch, AppendMatch]
properties = {'release_group': [None]}
def when(self, matches, context):
to_remove = []
to_append = []
# If a release_group is found before, ignore this kind of release_group rule.
if matches.named('release_group'):
return
if not matches.named('episode') and not matches.named('season') and matches.named('release_group'):
# This doesn't seems to be an anime, and we already found another release_group.
return
for filepart in marker_sorted(matches.markers.named('path'), matches):
# pylint:disable=bad-continuation
empty_group = matches.markers.range(filepart.start,
filepart.end,
lambda marker: (marker.name == 'group'
and not matches.range(marker.start, marker.end,
lambda m:
'weak-language' not in m.tags)
and marker.value.strip(seps)
and not int_coercable(marker.value.strip(seps))), 0)
if empty_group:
group = copy.copy(empty_group)
group.marker = False
group.raw_start += 1
group.raw_end -= 1
group.tags = ['anime']
group.name = 'release_group'
to_append.append(group)
to_remove.extend(matches.range(empty_group.start, empty_group.end,
lambda m: 'weak-language' in m.tags))
return to_remove, to_append
| 39.568528 | 116 | 0.555484 |
ace3f1943f5cd3b70de9a3354a2810bf4d3e5332 | 7,166 | py | Python | retrieval/dpr/utils/data_utils.py | xiaowu0162/NeuralKpGen | c985e00f6e463d32bb849c96a74926b924d53080 | [
"MIT"
] | 8 | 2020-09-08T23:33:55.000Z | 2021-09-30T05:34:08.000Z | retrieval/dpr/utils/data_utils.py | xiaowu0162/NeuralKpGen | c985e00f6e463d32bb849c96a74926b924d53080 | [
"MIT"
] | 1 | 2021-07-14T09:40:25.000Z | 2021-07-29T00:08:01.000Z | retrieval/dpr/utils/data_utils.py | xiaowu0162/NeuralKpGen | c985e00f6e463d32bb849c96a74926b924d53080 | [
"MIT"
] | 1 | 2021-10-09T04:16:17.000Z | 2021-10-09T04:16:17.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for general purpose data processing
"""
import json
import logging
import math
import pickle
import random
from typing import List, Iterator, Callable
from torch import Tensor as T
logger = logging.getLogger()
def read_serialized_data_from_files(paths: List[str]) -> List:
results = []
for i, path in enumerate(paths):
with open(path, "rb") as reader:
logger.info('Reading file %s', path)
data = pickle.load(reader)
results.extend(data)
logger.info('Aggregated data size: {}'.format(len(results)))
logger.info('Total data size: {}'.format(len(results)))
return results
def read_data_from_json_files(
paths: List[str],
upsample_rates: List = None,
dataset="kp20k",
keyword="all",
sep_token='',
) -> List:
results = []
if upsample_rates is None:
upsample_rates = [1] * len(paths)
assert len(upsample_rates) == len(paths), 'up-sample rates parameter doesn\'t match input files amount'
for i, path in enumerate(paths):
if dataset in ["kp20k", "kptimes"]:
logger.info("Loading {} dataset".format(dataset))
with open(path) as reader:
for row in reader:
ex = json.loads(row)
if keyword == 'all':
q = ' ; '.join(ex["present"] + ex["absent"])
else:
if len(ex[keyword]) == 0:
continue
q = ' ; '.join(ex[keyword])
text = ex["title"] + ' {} '.format(sep_token) + ex["abstract"]
ctx = {"text": text, "title": None, "answers": [text]}
object = {
"question": q,
"hard_negative_ctxs": [],
"negative_ctxs": [],
"positive_ctxs": [ctx],
"label": "1"
}
results.append(object)
if len(results) < 5:
logger.info("----------------------")
logger.info("Source/q: %s", q)
logger.info("Traget/context: %s", ctx["text"])
logger.info("----------------------")
logger.info('Aggregated data size: {}'.format(len(results)))
return results
class ShardedDataIterator(object):
"""
General purpose data iterator to be used for Pytorch's DDP mode where every node should handle its own part of
the data.
Instead of cutting data shards by their min size, it sets the amount of iterations by the maximum shard size.
It fills the extra sample by just taking first samples in a shard.
It can also optionally enforce identical batch size for all iterations (might be useful for DP mode).
"""
def __init__(
self,
data: list,
shard_id: int = 0,
num_shards: int = 1,
batch_size: int = 1,
shuffle=True,
shuffle_seed: int = 0,
offset: int = 0,
strict_batch_size: bool = False
):
self.data = data
total_size = len(data)
self.shards_num = max(num_shards, 1)
self.shard_id = max(shard_id, 0)
samples_per_shard = math.ceil(total_size / self.shards_num)
self.shard_start_idx = self.shard_id * samples_per_shard
self.shard_end_idx = min(self.shard_start_idx + samples_per_shard, total_size)
if strict_batch_size:
self.max_iterations = math.ceil(samples_per_shard / batch_size)
else:
self.max_iterations = int(samples_per_shard / batch_size)
logger.debug(
'samples_per_shard=%d, shard_start_idx=%d, shard_end_idx=%d, max_iterations=%d',
samples_per_shard,
self.shard_start_idx,
self.shard_end_idx,
self.max_iterations
)
self.iteration = offset # to track in-shard iteration status
self.shuffle = shuffle
self.batch_size = batch_size
self.shuffle_seed = shuffle_seed
self.strict_batch_size = strict_batch_size
def total_data_len(self) -> int:
return len(self.data)
def iterate_data(self, epoch: int = 0) -> Iterator[List]:
if self.shuffle:
# to be able to resume, same shuffling should be used when starting from a failed/stopped iteration
epoch_rnd = random.Random(self.shuffle_seed + epoch)
epoch_rnd.shuffle(self.data)
# if resuming iteration somewhere in the middle of epoch, one needs to adjust max_iterations
max_iterations = self.max_iterations - self.iteration
shard_samples = self.data[self.shard_start_idx:self.shard_end_idx]
for i in range(self.iteration * self.batch_size, len(shard_samples), self.batch_size):
items = shard_samples[i:i + self.batch_size]
if self.strict_batch_size and len(items) < self.batch_size:
logger.debug('Extending batch to max size')
items.extend(shard_samples[0:self.batch_size - len(items)])
self.iteration += 1
yield items
# some shards may done iterating while the others are at the last batch. Just return the first batch
while self.iteration < max_iterations:
logger.debug('Fulfilling non complete shard='.format(self.shard_id))
self.iteration += 1
batch = shard_samples[0:self.batch_size]
yield batch
logger.debug('Finished iterating, iteration={}, shard={}'.format(self.iteration, self.shard_id))
# reset the iteration status
self.iteration = 0
def get_iteration(self) -> int:
return self.iteration
def apply(self, visitor_func: Callable):
for sample in self.data:
visitor_func(sample)
def normalize_question(question: str) -> str:
if question[-1] == '?':
question = question[:-1]
return question
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(self, text: str, type: str, title: str = None, add_special_tokens: bool = True):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
| 34.618357 | 114 | 0.596009 |
ace3f1fed0ce108f28aab18a3f16b5f2f3f5e476 | 1,491 | py | Python | Mariadb_activate.py | alan1world/yt-db-link-cleaner | bbfc12ebb178b4a82b14b9625be5afd5b59752a4 | [
"MIT"
] | null | null | null | Mariadb_activate.py | alan1world/yt-db-link-cleaner | bbfc12ebb178b4a82b14b9625be5afd5b59752a4 | [
"MIT"
] | null | null | null | Mariadb_activate.py | alan1world/yt-db-link-cleaner | bbfc12ebb178b4a82b14b9625be5afd5b59752a4 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import keyring, getpass
import mysql.connector as mariadb
import mariadbcontrol
def run_mariadb()->str:
if "Mariadb is not running" in mariadbcontrol.mariadb_is_running():
mariadbcontrol.mariadb_start()
print("Mariadb has been started")
return "stop"
else:
print("Mariadb was already running")
return "leave"
def key_implement(dbase:str = 'link_store', usr:str = 'alan', respond:bool=True)->str:
if keyring.get_password(dbase, usr) == None:
usr_pass = getpass.getpass(prompt="Mysql password:")
keyring.set_password(dbase, usr, usr_pass)
if respond:
print("Password set")
return usr_pass
else:
if respond:
print("Password found")
return keyring.get_password(dbase, usr)
def activate_mariadb(check_state:bool=True)->object:
import sqlalchemy
usr = 'alan'
dbase = 'link_store'
# db='links'
psswd = key_implement(dbase = dbase, usr = usr, respond=False)
if check_state:
# mariadb_state = run_mariadb()
run_mariadb()
return sqlalchemy.create_engine(f'mysql+mysqlconnector://{usr}:{psswd}@127.0.0.1/{dbase}')
# from sqlalchemy import create_engine
# engine = create_engine('mysql+mysqlconnector://[user]:[pw]@127.0.0.1/[dbname]')
# engine = sqlalchemy.create_engine(f'mysql+mysqlconnector://{usr}:{psswd}@127.0.0.1/{dbase}', echo=True)
| 31.0625 | 105 | 0.645875 |
ace3f27d9f6fdaa0680812522fb35b479df66475 | 2,338 | py | Python | ote_sdk/ote_sdk/configuration/enums/model_lifecycle.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 775 | 2019-03-01T02:13:33.000Z | 2020-09-07T22:49:15.000Z | ote_sdk/ote_sdk/configuration/enums/model_lifecycle.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 229 | 2019-02-28T21:37:08.000Z | 2020-09-07T15:11:49.000Z | ote_sdk/ote_sdk/configuration/enums/model_lifecycle.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 290 | 2019-02-28T20:32:11.000Z | 2020-09-07T05:51:41.000Z | """ This module contains the ModelLifecycle Enum """
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from enum import Enum, auto
class ModelLifecycle(Enum):
"""
This Enum represents the different stages in the ModelLifecycle. It is used by configuration parameters to indicate
in which stage of the model lifecycle the parameter takes effect. Selecting a stage early in the lifecycle implies
that all downstream stages are affected as well (e.g. if this is set to `ModelLifecycle.TRAINING`, it is assumed
that inference and testing are also impacted).
Currently the following stages are possible:
ARCHITECTURE - Select this stage if the parameter modifies the model architecture, such that the most recently
trained weights cannot directly by used for the next training round due to a model topology mismatch. For
example, a parameter `model_depth` that controls the number of downsampling steps in a UNet model should
have this stage set.
TRAINING - Select this stage if the parameter is likely to change the outcome of the training process. For example,
the parameter `learning_rate` should have this stage set.
INFERENCE - Select this stage if the parameter changes the result of inference. For example, a parameter
`probability_threshold` that controls the threshold for binary classification should have this stage set.
TESTING - Select this stage if the parameter changes the outcome of the evaluation process. For example, a parameter
'test_metric` that controls which metric to use for testing does not change training or inference results, but
does affect the final evaluation of the model. Therefore, it should have this stage set.
NONE - Select this stage if the parameter is non-functional, for example if it only impacts training speed but
should not change the training outcome. For example, a parameter `num_workers` that controls the number of
threads used in a dataloader should have this stage set.
"""
NONE = auto()
ARCHITECTURE = auto()
TRAINING = auto()
INFERENCE = auto()
TESTING = auto()
def __str__(self):
"""
Retrieves the string representation of an instance of the Enum
"""
return self.name
| 50.826087 | 120 | 0.735672 |
ace3f2a511654c1b02ec33c7ac1a8c27f0747e7c | 1,525 | bzl | Python | bazel/thrift.bzl | dagmom/pixie | 5981974ea99ff20fa710ccc55c116974cf3817ae | [
"Apache-2.0"
] | 1,821 | 2020-04-08T00:45:27.000Z | 2021-09-01T14:56:25.000Z | bazel/thrift.bzl | dagmom/pixie | 5981974ea99ff20fa710ccc55c116974cf3817ae | [
"Apache-2.0"
] | 142 | 2020-04-09T06:23:46.000Z | 2021-08-24T06:02:12.000Z | bazel/thrift.bzl | dagmom/pixie | 5981974ea99ff20fa710ccc55c116974cf3817ae | [
"Apache-2.0"
] | 105 | 2021-09-08T10:26:50.000Z | 2022-03-29T09:13:36.000Z | # Copyright 2018- The Pixie Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
load("@io_bazel_rules_scala//twitter_scrooge:twitter_scrooge.bzl", "twitter_scrooge")
load("@rules_jvm_external//:defs.bzl", "maven_install")
def thrift_deps(scala_version):
twitter_scrooge()
finagle_version = "21.4.0"
scala_minor_version = ".".join(scala_version.split(".")[:2])
maven_install(
artifacts = [
"com.twitter:finagle-thriftmux_%s:%s" % (scala_minor_version, finagle_version),
"com.twitter:finagle-mux_%s:%s" % (scala_minor_version, finagle_version),
"com.twitter:finagle-core_%s:%s" % (scala_minor_version, finagle_version),
"com.twitter:scrooge-core_%s:%s" % (scala_minor_version, finagle_version),
"com.twitter:finagle-http_%s:%s" % (scala_minor_version, finagle_version),
"org.apache.thrift:libthrift:0.10.0",
],
repositories = ["https://repo1.maven.org/maven2"],
)
| 41.216216 | 91 | 0.701639 |
ace3f2f571fe61d7fd30d75f8ae9a1fc93a63a0e | 8,007 | py | Python | tests/bugs/core_4271_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_4271_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_4271_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_4271
# title: Engine crashs in case of re-creation of an erratic package body
# decription:
# tracker_id: CORE-4271
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
set term ^;
create or alter package pkg_sequence
as
begin
procedure initialize(min_value int, max_value int, step int);
function get_current_value returns int;
function next_value returns int;
function is_end returns boolean;
end
^
recreate package body pkg_sequence
as
begin
function get_max returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'MAX_VALUE') as int);
end
function set_max(avalue int) returns int as
begin
rdb$set_context('USER_SESSION', 'MAX_VALUE', avalue);
return avalue;
end
function get_min returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'MIN_VALUE') as int);
end
function set_min(avalue int) returns int as
begin
rdb$set_context('USER_SESSION', 'MIN_VALUE', avalue);
return avalue;
end
function get_step returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'STEP_VALUE') as int);
end
function set_step(avalue int) returns int
as
begin
rdb$set_context('USER_SESSION', 'STEP_VALUE', avalue);
return avalue;
end
function get_current_value returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'CURRENT_VALUE') as int);
end
function set_current_value(avalue int) returns int as
begin
rdb$set_context('USER_SESSION', 'CURRENT_VALUE', avalue);
return avalue;
end
function next_value returns int as
begin
if (not is_end()) then
set_current_value(get_current_value() + get_step());
return get_current_value();
end
function is_end returns boolean as
begin
return get_current_value() > get_max();
end
procedure initialize(min_value int, max_value int, step int)
as
begin
set_min(min_value);
set_max(max_value);
set_step(step);
set_current_value(min_value);
end
end
^
execute block returns ( out int) as
begin
execute procedure pkg_sequence.initialize(10, 140, 5);
out = pkg_sequence.get_current_value();
suspend;
while (not pkg_sequence.is_end()) do
begin
out = pkg_sequence.next_value();
suspend;
end
end
^
recreate package body pkg_sequence
as
begin
function get_max returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'MAX_VALUE') as int);
end
function set_max(avalue int) returns int as
begin
rdb$set_context('USER_SESSION', 'MAX_VALUE', avalue);
return avalue;
end
function get_min returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'MIN_VALUE') as int);
end
function set_min(avalue int) returns int as
begin
rdb$set_context('USER_SESSION', 'MIN_VALUE', avalue);
return avalue;
end
function get_step returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'STEP_VALUE') as int);
end
function set_step(avalue int) returns int as
begin
rdb$set_context('USER_SESSION', 'STEP_VALUE', avalue);
return avalue;
end
function get_current_value returns int as
begin
return cast(rdb$get_context('USER_SESSION', 'CURRENT_VALUE') as int);
end
function set_current_value(avalue int) returns int
as
begin
rdb$set_context('USER_SESSION', 'CURRENT_VALUE', avalue);
return avalue;
end
function next_value returns int as
begin
if (not is_end()) then
set_current_value(get_current_value() + get_step());
return get_current_value();
end
function is_end returns boolean as
begin
return get_current_value() > get_max();
end
procedure initialize(min_value int, max_value int, step int) as
begin
set_min(min_value);
set_max(max_value);
set_step(step);
set_current_value(min_value);
end
end
^
execute block returns (out int) as
begin
execute procedure pkg_sequence.initialize(10, 140, 5);
out = pkg_sequence.get_current_value();
suspend;
while (not pkg_sequence.is_end()) do
begin
out = pkg_sequence.next_value();
suspend;
end
end
^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
OUT 10
OUT 15
OUT 20
OUT 25
OUT 30
OUT 35
OUT 40
OUT 45
OUT 50
OUT 55
OUT 60
OUT 65
OUT 70
OUT 75
OUT 80
OUT 85
OUT 90
OUT 95
OUT 100
OUT 105
OUT 110
OUT 115
OUT 120
OUT 125
OUT 130
OUT 135
OUT 140
OUT 145
OUT 10
OUT 15
OUT 20
OUT 25
OUT 30
OUT 35
OUT 40
OUT 45
OUT 50
OUT 55
OUT 60
OUT 65
OUT 70
OUT 75
OUT 80
OUT 85
OUT 90
OUT 95
OUT 100
OUT 105
OUT 110
OUT 115
OUT 120
OUT 125
OUT 130
OUT 135
OUT 140
OUT 145
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 29.116364 | 81 | 0.453853 |
ace3f4cb2c5ee057a4eb47e3e25003a82c739e9c | 1,982 | py | Python | utils/saver.py | mkroutikov/pytorch-deeplab-xception | 0fcdcf262a5be6cedd77c92422da24277f069c58 | [
"MIT"
] | null | null | null | utils/saver.py | mkroutikov/pytorch-deeplab-xception | 0fcdcf262a5be6cedd77c92422da24277f069c58 | [
"MIT"
] | null | null | null | utils/saver.py | mkroutikov/pytorch-deeplab-xception | 0fcdcf262a5be6cedd77c92422da24277f069c58 | [
"MIT"
] | null | null | null | import os
import shutil
import torch
from collections import OrderedDict
import glob
import json
class Saver(object):
def __init__(self, args):
self.args = args
self.directory = os.path.join('run', args.dataset, args.checkname)
self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*')))
run_id = int(self.runs[-1].split('_')[-1]) + 1 if self.runs else 0
self.experiment_dir = os.path.join(self.directory, 'experiment_%04d'% run_id)
os.makedirs(self.experiment_dir, exist_ok=True)
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
filename = os.path.join(self.experiment_dir, filename)
torch.save(state, filename)
if is_best:
best_pred = state['best_pred']
with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:
f.write(str(best_pred))
if self.runs:
previous_miou = [0.0]
for run in self.runs:
run_id = run.split('_')[-1]
path = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)), 'best_pred.txt')
if os.path.exists(path):
with open(path, 'r') as f:
miou = float(f.readline())
previous_miou.append(miou)
else:
continue
max_miou = max(previous_miou)
if best_pred > max_miou:
shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))
else:
shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))
def save_experiment_config(self):
logfile = os.path.join(self.experiment_dir, 'parameters.json')
with open(logfile, 'w') as f:
json.dump(self.args.__dict__, f, indent=4)
| 41.291667 | 109 | 0.57114 |
ace3f525286a6c2c37db065eb46d45cf1f90c1e0 | 208 | py | Python | Algorithms/Implementation/extra-long-factorials.py | ekant1999/HackerRank | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | [
"MIT"
] | 9 | 2017-03-19T16:27:31.000Z | 2022-02-17T11:42:21.000Z | Algorithms/Implementation/extra-long-factorials.py | ekant1999/HackerRank | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | [
"MIT"
] | null | null | null | Algorithms/Implementation/extra-long-factorials.py | ekant1999/HackerRank | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | [
"MIT"
] | 6 | 2019-02-18T11:26:24.000Z | 2022-03-21T14:13:15.000Z | # Python 2
# Note this is not complicated in Python since
# Python automatically handles big integers
import sys
import math
n = int(raw_input().strip())
answer = math.factorial(n)
print answer | 17.333333 | 47 | 0.716346 |
ace3f580fbf1401711e391204bdb203e89644032 | 517 | py | Python | plotly/validators/scattercarpet/marker/colorbar/_tickformat.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scattercarpet/marker/colorbar/_tickformat.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scattercarpet/marker/colorbar/_tickformat.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='tickformat',
parent_name='scattercarpet.marker.colorbar',
**kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 27.210526 | 72 | 0.624758 |
ace3f5efa85c9edb6be42347c3ee815cefc8946a | 323 | py | Python | pysqlsimplecipher/config.py | thesky-cdn/bot-dokkan-battle | cea44eefef279969d0769edc27bbbe203ffa6c77 | [
"MIT"
] | null | null | null | pysqlsimplecipher/config.py | thesky-cdn/bot-dokkan-battle | cea44eefef279969d0769edc27bbbe203ffa6c77 | [
"MIT"
] | null | null | null | pysqlsimplecipher/config.py | thesky-cdn/bot-dokkan-battle | cea44eefef279969d0769edc27bbbe203ffa6c77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Module : config.py
# Author : bssthu
# Project : pysqlsimplecipher
# Creation date : 2016-06-03
# Description :
#
salt_mask = 0x3a
key_sz = 32
key_iter = 64000
hmac_key_sz = 32
hmac_key_iter = 2
page_sz = 1024
iv_sz = 16
reserve_sz = 48
hmac_sz = 20
| 16.15 | 35 | 0.640867 |
ace3f6106060aa35974c6fa3fff1855f9cdc89ac | 11,804 | py | Python | homeassistant/components/vacuum/__init__.py | LeoCal/home-assistant | bc53e9d0c824cea2b174584bdaa683e81f0c2d02 | [
"Apache-2.0"
] | 3 | 2020-05-18T10:18:16.000Z | 2020-12-08T11:27:55.000Z | homeassistant/components/vacuum/__init__.py | LeoCal/home-assistant | bc53e9d0c824cea2b174584bdaa683e81f0c2d02 | [
"Apache-2.0"
] | 1 | 2020-02-20T02:36:40.000Z | 2020-02-21T19:15:48.000Z | homeassistant/components/vacuum/__init__.py | LeoCal/home-assistant | bc53e9d0c824cea2b174584bdaa683e81f0c2d02 | [
"Apache-2.0"
] | 6 | 2020-04-10T06:21:11.000Z | 2021-07-01T08:53:38.000Z | """Support for vacuum cleaner robots (botvacs)."""
from datetime import timedelta
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components import group
from homeassistant.const import ( # noqa: F401 # STATE_PAUSED/IDLE are API
ATTR_BATTERY_LEVEL,
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_PAUSED,
STATE_IDLE,
)
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa
ENTITY_SERVICE_SCHEMA,
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity, Entity
from homeassistant.helpers.icon import icon_for_battery_level
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vacuum"
SCAN_INTERVAL = timedelta(seconds=20)
GROUP_NAME_ALL_VACUUMS = "all vacuum cleaners"
ENTITY_ID_ALL_VACUUMS = group.ENTITY_ID_FORMAT.format("all_vacuum_cleaners")
ATTR_BATTERY_ICON = "battery_icon"
ATTR_CLEANED_AREA = "cleaned_area"
ATTR_FAN_SPEED = "fan_speed"
ATTR_FAN_SPEED_LIST = "fan_speed_list"
ATTR_PARAMS = "params"
ATTR_STATUS = "status"
SERVICE_CLEAN_SPOT = "clean_spot"
SERVICE_LOCATE = "locate"
SERVICE_RETURN_TO_BASE = "return_to_base"
SERVICE_SEND_COMMAND = "send_command"
SERVICE_SET_FAN_SPEED = "set_fan_speed"
SERVICE_START_PAUSE = "start_pause"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_STOP = "stop"
VACUUM_SET_FAN_SPEED_SERVICE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_FAN_SPEED): cv.string}
)
VACUUM_SEND_COMMAND_SERVICE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMS): vol.Any(dict, cv.ensure_list),
}
)
STATE_CLEANING = "cleaning"
STATE_DOCKED = "docked"
STATE_RETURNING = "returning"
STATE_ERROR = "error"
STATES = [STATE_CLEANING, STATE_DOCKED, STATE_RETURNING, STATE_ERROR]
DEFAULT_NAME = "Vacuum cleaner robot"
SUPPORT_TURN_ON = 1
SUPPORT_TURN_OFF = 2
SUPPORT_PAUSE = 4
SUPPORT_STOP = 8
SUPPORT_RETURN_HOME = 16
SUPPORT_FAN_SPEED = 32
SUPPORT_BATTERY = 64
SUPPORT_STATUS = 128
SUPPORT_SEND_COMMAND = 256
SUPPORT_LOCATE = 512
SUPPORT_CLEAN_SPOT = 1024
SUPPORT_MAP = 2048
SUPPORT_STATE = 4096
SUPPORT_START = 8192
@bind_hass
def is_on(hass, entity_id=None):
"""Return if the vacuum is on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_VACUUMS
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the vacuum component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_VACUUMS
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, ENTITY_SERVICE_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TURN_OFF, ENTITY_SERVICE_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TOGGLE, ENTITY_SERVICE_SCHEMA, "async_toggle"
)
component.async_register_entity_service(
SERVICE_START_PAUSE, ENTITY_SERVICE_SCHEMA, "async_start_pause"
)
component.async_register_entity_service(
SERVICE_START, ENTITY_SERVICE_SCHEMA, "async_start"
)
component.async_register_entity_service(
SERVICE_PAUSE, ENTITY_SERVICE_SCHEMA, "async_pause"
)
component.async_register_entity_service(
SERVICE_RETURN_TO_BASE, ENTITY_SERVICE_SCHEMA, "async_return_to_base"
)
component.async_register_entity_service(
SERVICE_CLEAN_SPOT, ENTITY_SERVICE_SCHEMA, "async_clean_spot"
)
component.async_register_entity_service(
SERVICE_LOCATE, ENTITY_SERVICE_SCHEMA, "async_locate"
)
component.async_register_entity_service(
SERVICE_STOP, ENTITY_SERVICE_SCHEMA, "async_stop"
)
component.async_register_entity_service(
SERVICE_SET_FAN_SPEED,
VACUUM_SET_FAN_SPEED_SERVICE_SCHEMA,
"async_set_fan_speed",
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND, VACUUM_SEND_COMMAND_SERVICE_SCHEMA, "async_send_command"
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class _BaseVacuum(Entity):
"""Representation of a base vacuum.
Contains common properties and functions for all vacuum devices.
"""
@property
def supported_features(self):
"""Flag vacuum cleaner features that are supported."""
raise NotImplementedError()
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return None
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return None
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
raise NotImplementedError()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
raise NotImplementedError()
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.stop, **kwargs))
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
raise NotImplementedError()
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.return_to_base, **kwargs))
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
raise NotImplementedError()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.clean_spot, **kwargs))
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
raise NotImplementedError()
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.locate, **kwargs))
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
raise NotImplementedError()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.set_fan_speed, fan_speed, **kwargs)
)
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
raise NotImplementedError()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.send_command, command, params=params, **kwargs)
)
class VacuumDevice(_BaseVacuum, ToggleEntity):
"""Representation of a vacuum cleaner robot."""
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = False
if self.status is not None:
charging = "charg" in self.status.lower()
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self.status is not None:
data[ATTR_STATUS] = self.status
if self.battery_level is not None:
data[ATTR_BATTERY_LEVEL] = self.battery_level
data[ATTR_BATTERY_ICON] = self.battery_icon
if self.fan_speed is not None:
data[ATTR_FAN_SPEED] = self.fan_speed
data[ATTR_FAN_SPEED_LIST] = self.fan_speed_list
return data
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_off, **kwargs))
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
raise NotImplementedError()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.start_pause, **kwargs))
async def async_pause(self):
"""Not supported."""
pass
async def async_start(self):
"""Not supported."""
pass
class StateVacuumDevice(_BaseVacuum):
"""Representation of a vacuum cleaner robot that supports states."""
@property
def state(self):
"""Return the state of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = bool(self.state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self.battery_level is not None:
data[ATTR_BATTERY_LEVEL] = self.battery_level
data[ATTR_BATTERY_ICON] = self.battery_icon
if self.fan_speed is not None:
data[ATTR_FAN_SPEED] = self.fan_speed
data[ATTR_FAN_SPEED_LIST] = self.fan_speed_list
return data
def start(self):
"""Start or resume the cleaning task."""
raise NotImplementedError()
async def async_start(self):
"""Start or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.start)
def pause(self):
"""Pause the cleaning task."""
raise NotImplementedError()
async def async_pause(self):
"""Pause the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.pause)
async def async_turn_on(self, **kwargs):
"""Not supported."""
pass
async def async_turn_off(self, **kwargs):
"""Not supported."""
pass
async def async_toggle(self, **kwargs):
"""Not supported."""
pass
| 29.436409 | 86 | 0.682057 |
ace3f6bf7f81a1dc8bd49783c1a76018e94af96b | 1,201 | py | Python | recursive_sorting/recursive_sorting.py | Zooheck/Sorting | 4498b5572c3ed2ddd8b509b4b75cc7a187fcf82f | [
"MIT"
] | 1 | 2019-05-01T03:27:59.000Z | 2019-05-01T03:27:59.000Z | recursive_sorting/recursive_sorting.py | Zooheck/Sorting | 4498b5572c3ed2ddd8b509b4b75cc7a187fcf82f | [
"MIT"
] | null | null | null | recursive_sorting/recursive_sorting.py | Zooheck/Sorting | 4498b5572c3ed2ddd8b509b4b75cc7a187fcf82f | [
"MIT"
] | null | null | null | # TO-DO: complete the helpe function below to merge 2 sorted arrays
def merge(arrA, arrB):
elements = len(arrA) + len(arrB)
merged_arr = [0] * elements
a_index = 0
b_index = 0
for i in range(0, len(merged_arr)):
if len(arrA) - 1 < a_index:
merged_arr[i] = arrB[b_index]
b_index += 1
elif len(arrB) - 1 < b_index:
merged_arr[i] = arrA[a_index]
a_index += 1
elif arrA[a_index] > arrB[b_index]:
merged_arr[i] = arrB[b_index]
b_index += 1
else:
merged_arr[i] = arrA[a_index]
a_index += 1
return merged_arr
print(merge([1, 2, 3], [0.5, 4, 5, 6]))
# TO-DO: implement the Merge Sort function below USING RECURSION
# def merge_sort(arr):
# # TO-DO
# return arr
# # STRETCH: implement an in-place merge sort algorithm
# def merge_in_place(arr, start, mid, end):
# # TO-DO
# return arr
# def merge_sort_in_place(arr, l, r):
# # TO-DO
# return arr
# # STRETCH: implement the Timsort function below
# # hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt
# def timsort(arr):
# return arr
| 23.54902 | 86 | 0.588676 |
ace3f770d9d7dcc45ee7545bf7fca829fa824740 | 8,401 | py | Python | xlit_s2s_nmt/inference.py | vsoch/NETransliteration-COLING2018 | 5d5f59e561ecea45a6d3602121e1049baa7a76c3 | [
"MIT"
] | 52 | 2018-06-12T17:08:17.000Z | 2022-02-20T11:35:30.000Z | xlit_s2s_nmt/inference.py | vsoch/NETransliteration-COLING2018 | 5d5f59e561ecea45a6d3602121e1049baa7a76c3 | [
"MIT"
] | 2 | 2018-08-18T20:00:29.000Z | 2019-10-03T10:12:26.000Z | xlit_s2s_nmt/inference.py | vsoch/NETransliteration-COLING2018 | 5d5f59e561ecea45a6d3602121e1049baa7a76c3 | [
"MIT"
] | 10 | 2018-06-12T18:04:37.000Z | 2021-04-15T17:32:57.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""To perform inference on test set given a trained model."""
from __future__ import print_function
import codecs
import time
import tensorflow as tf
from . import attention_model
from . import gnmt_model
from . import model as nmt_model
from . import model_helper
from .utils import misc_utils as utils
from .utils import nmt_utils
__all__ = ["load_data", "inference",
"single_worker_inference", "multi_worker_inference"]
def _decode_inference_indices(model, sess, output_infer,
output_infer_summary_prefix,
inference_indices,
tgt_eos,
subword_option):
"""Decoding only a specific set of sentences."""
utils.print_out(" decoding to output %s , num sents %d." %
(output_infer, len(inference_indices)))
start_time = time.time()
with codecs.getwriter("utf-8")(
tf.gfile.GFile(output_infer, mode="wb")) as trans_f:
trans_f.write("") # Write empty string to ensure file is created.
for decode_id in inference_indices:
nmt_outputs, infer_summary = model.decode(sess)
# get text translation
assert nmt_outputs.shape[0] == 1
translation = nmt_utils.get_translation(
nmt_outputs,
sent_id=0,
tgt_eos=tgt_eos,
subword_option=subword_option)
if infer_summary is not None: # Attention models
image_file = output_infer_summary_prefix + str(decode_id) + ".png"
utils.print_out(" save attention image to %s*" % image_file)
image_summ = tf.Summary()
image_summ.ParseFromString(infer_summary)
with tf.gfile.GFile(image_file, mode="w") as img_f:
img_f.write(image_summ.value[0].image.encoded_image_string)
trans_f.write("%s\n" % translation)
utils.print_out(translation + b"\n")
utils.print_time(" done", start_time)
def load_data(inference_input_file, hparams=None):
"""Load inference data."""
with codecs.getreader("utf-8")(
tf.gfile.GFile(inference_input_file, mode="rb")) as f:
inference_data = f.read().splitlines()
if hparams and hparams.inference_indices:
inference_data = [inference_data[i] for i in hparams.inference_indices]
return inference_data
def inference(ckpt,
inference_input_file,
inference_output_file,
hparams,
num_workers=1,
jobid=0,
scope=None):
"""Perform translation."""
if hparams.inference_indices:
assert num_workers == 1
if not hparams.attention:
model_creator = nmt_model.Model
elif hparams.attention_architecture == "standard":
model_creator = attention_model.AttentionModel
elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]:
model_creator = gnmt_model.GNMTModel
else:
raise ValueError("Unknown model architecture")
infer_model = model_helper.create_infer_model(model_creator, hparams, scope)
if num_workers == 1:
single_worker_inference(
infer_model,
ckpt,
inference_input_file,
inference_output_file,
hparams)
else:
multi_worker_inference(
infer_model,
ckpt,
inference_input_file,
inference_output_file,
hparams,
num_workers=num_workers,
jobid=jobid)
def single_worker_inference(infer_model,
ckpt,
inference_input_file,
inference_output_file,
hparams):
"""Inference with a single worker."""
output_infer = inference_output_file
# Read data
infer_data = load_data(inference_input_file, hparams)
with tf.Session(
graph=infer_model.graph, config=utils.get_config_proto()) as sess:
loaded_infer_model = model_helper.load_model(
infer_model.model, ckpt, sess, "infer")
sess.run(
infer_model.iterator.initializer,
feed_dict={
infer_model.src_placeholder: infer_data,
infer_model.batch_size_placeholder: hparams.infer_batch_size
})
# Decode
utils.print_out("# Start decoding")
if hparams.inference_indices:
_decode_inference_indices(
loaded_infer_model,
sess,
output_infer=output_infer,
output_infer_summary_prefix=output_infer,
inference_indices=hparams.inference_indices,
tgt_eos=hparams.eos,
subword_option=hparams.subword_option)
else:
utils.print_out("decoding with beam %d and %d trans per" % (hparams.beam_width, hparams.num_translations_per_input))
nmt_utils.decode_and_evaluate(
"infer",
loaded_infer_model,
sess,
output_infer,
ref_file=None,
metrics=hparams.metrics,
subword_option=hparams.subword_option,
beam_width=hparams.beam_width,
tgt_eos=hparams.eos,
num_translations_per_input=hparams.num_translations_per_input,
infer_data=infer_data)
def multi_worker_inference(infer_model,
ckpt,
inference_input_file,
inference_output_file,
hparams,
num_workers,
jobid):
"""Inference using multiple workers."""
assert num_workers > 1
final_output_infer = inference_output_file
output_infer = "%s_%d" % (inference_output_file, jobid)
output_infer_done = "%s_done_%d" % (inference_output_file, jobid)
# Read data
infer_data = load_data(inference_input_file, hparams)
# Split data to multiple workers
total_load = len(infer_data)
load_per_worker = int((total_load - 1) / num_workers) + 1
start_position = jobid * load_per_worker
end_position = min(start_position + load_per_worker, total_load)
infer_data = infer_data[start_position:end_position]
with tf.Session(
graph=infer_model.graph, config=utils.get_config_proto()) as sess:
loaded_infer_model = model_helper.load_model(
infer_model.model, ckpt, sess, "infer")
sess.run(infer_model.iterator.initializer,
{
infer_model.src_placeholder: infer_data,
infer_model.batch_size_placeholder: hparams.infer_batch_size
})
# Decode
utils.print_out("# Start decoding")
nmt_utils.decode_and_evaluate(
"infer",
loaded_infer_model,
sess,
output_infer,
ref_file=None,
metrics=hparams.metrics,
subword_option=hparams.subword_option,
beam_width=hparams.beam_width,
tgt_eos=hparams.eos,
num_translations_per_input=hparams.num_translations_per_input)
# Change file name to indicate the file writing is completed.
tf.gfile.Rename(output_infer, output_infer_done, overwrite=True)
# Job 0 is responsible for the clean up.
if jobid != 0: return
# Now write all translations
with codecs.getwriter("utf-8")(
tf.gfile.GFile(final_output_infer, mode="wb")) as final_f:
for worker_id in range(num_workers):
worker_infer_done = "%s_done_%d" % (inference_output_file, worker_id)
while not tf.gfile.Exists(worker_infer_done):
utils.print_out(" waitting job %d to complete." % worker_id)
time.sleep(10)
with codecs.getreader("utf-8")(
tf.gfile.GFile(worker_infer_done, mode="rb")) as f:
for translation in f:
final_f.write("%s" % translation)
for worker_id in range(num_workers):
worker_infer_done = "%s_done_%d" % (inference_output_file, worker_id)
tf.gfile.Remove(worker_infer_done)
| 35.004167 | 122 | 0.654684 |
ace3f801bd8ec58268b593c8fb4c0f859202064a | 50,181 | py | Python | manila/db/api.py | snpd25/manila | 9cf435c7f86a7b79e01af7b8bc88cd619e34cab4 | [
"Apache-2.0"
] | null | null | null | manila/db/api.py | snpd25/manila | 9cf435c7f86a7b79e01af7b8bc88cd619e34cab4 | [
"Apache-2.0"
] | null | null | null | manila/db/api.py | snpd25/manila | 9cf435c7f86a7b79e01af7b8bc88cd619e34cab4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the manila.db namespace. Call these
functions from manila.db namespace, not the manila.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/manila/manila.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import api as db_api
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for database.'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create.'),
cfg.StrOpt('share_name_template',
default='share-%s',
help='Template string to be used to generate share names.'),
cfg.StrOpt('share_snapshot_name_template',
default='share-snapshot-%s',
help='Template string to be used to generate share snapshot '
'names.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
_BACKEND_MAPPING = {'sqlalchemy': 'manila.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
return IMPL.authorize_project_context(context, project_id)
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
return IMPL.authorize_quota_class_context(context, class_name)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_share_sorted(context):
"""Get all share services sorted by share count.
:returns: a list of (Service, share_count) tuples.
"""
return IMPL.service_get_all_share_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
####################
def quota_create(context, project_id, resource, limit, user_id=None,
share_type_id=None):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
user_id=user_id, share_type_id=share_type_id)
def quota_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all quotas associated with a given project and user."""
return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id)
def quota_get_all_by_project_and_share_type(context, project_id,
share_type_id):
"""Retrieve all quotas associated with a given project and user."""
return IMPL.quota_get_all_by_project_and_share_type(
context, project_id, share_type_id)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_get_all(context, project_id):
"""Retrieve all user quotas associated with a given project."""
return IMPL.quota_get_all(context, project_id)
def quota_update(context, project_id, resource, limit, user_id=None,
share_type_id=None):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit,
user_id=user_id, share_type_id=share_type_id)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
###################
def quota_usage_get(context, project_id, resource, user_id=None,
share_type_id=None):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(
context, project_id, resource, user_id=user_id,
share_type_id=share_type_id)
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project_and_user(context,
project_id, user_id)
def quota_usage_get_all_by_project_and_share_type(context, project_id,
share_type_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project_and_share_type(
context, project_id, share_type_id)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_create(context, project_id, user_id, resource, in_use,
reserved=0, until_refresh=None, share_type_id=None):
"""Create a quota usage."""
return IMPL.quota_usage_create(
context, project_id, user_id, resource, in_use, reserved,
until_refresh, share_type_id=share_type_id)
def quota_usage_update(context, project_id, user_id, resource,
share_type_id=None, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(
context, project_id, user_id, resource, share_type_id=share_type_id,
**kwargs)
###################
def quota_reserve(context, resources, quotas, user_quotas, share_type_quotas,
deltas, expire, until_refresh, max_age,
project_id=None, user_id=None, share_type_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(
context, resources, quotas, user_quotas, share_type_quotas, deltas,
expire, until_refresh, max_age, project_id=project_id, user_id=user_id,
share_type_id=share_type_id)
def reservation_commit(context, reservations, project_id=None, user_id=None,
share_type_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(
context, reservations, project_id=project_id, user_id=user_id,
share_type_id=share_type_id)
def reservation_rollback(context, reservations, project_id=None, user_id=None,
share_type_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(
context, reservations, project_id=project_id, user_id=user_id,
share_type_id=share_type_id)
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
"""Destroy all quotas associated with a given project and user."""
return IMPL.quota_destroy_all_by_project_and_user(context,
project_id, user_id)
def quota_destroy_all_by_project_and_share_type(context, project_id,
share_type_id):
"""Destroy all quotas associated with a given project and user."""
return IMPL.quota_destroy_all_by_project_and_share_type(
context, project_id, share_type_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def share_instance_get(context, instance_id, with_share_data=False):
"""Get share instance by id."""
return IMPL.share_instance_get(context, instance_id,
with_share_data=with_share_data)
def share_instance_create(context, share_id, values):
"""Create new share instance."""
return IMPL.share_instance_create(context, share_id, values)
def share_instance_delete(context, instance_id, session=None,
need_to_update_usages=False):
"""Delete share instance."""
return IMPL.share_instance_delete(
context, instance_id, session=session,
need_to_update_usages=need_to_update_usages)
def share_instance_update(context, instance_id, values, with_share_data=False):
"""Update share instance fields."""
return IMPL.share_instance_update(context, instance_id, values,
with_share_data=with_share_data)
def share_instances_host_update(context, current_host, new_host):
"""Update the host attr of all share instances that are on current_host."""
return IMPL.share_instances_host_update(context, current_host, new_host)
def share_instances_get_all(context, filters=None):
"""Returns all share instances."""
return IMPL.share_instances_get_all(context, filters=filters)
def share_instances_get_all_by_share_server(context, share_server_id):
"""Returns all share instances with given share_server_id."""
return IMPL.share_instances_get_all_by_share_server(context,
share_server_id)
def share_instances_get_all_by_host(context, host, with_share_data=False):
"""Returns all share instances with given host."""
return IMPL.share_instances_get_all_by_host(
context, host, with_share_data=with_share_data)
def share_instances_get_all_by_share_network(context, share_network_id):
"""Returns list of shares that belong to given share network."""
return IMPL.share_instances_get_all_by_share_network(context,
share_network_id)
def share_instances_get_all_by_share(context, share_id):
"""Returns list of shares that belong to given share."""
return IMPL.share_instances_get_all_by_share(context, share_id)
def share_instances_get_all_by_share_group_id(context, share_group_id):
"""Returns list of share instances that belong to given share group."""
return IMPL.share_instances_get_all_by_share_group_id(
context, share_group_id)
###################
def share_create(context, share_values, create_share_instance=True):
"""Create new share."""
return IMPL.share_create(context, share_values,
create_share_instance=create_share_instance)
def share_update(context, share_id, values):
"""Update share fields."""
return IMPL.share_update(context, share_id, values)
def share_get(context, share_id):
"""Get share by id."""
return IMPL.share_get(context, share_id)
def share_get_all(context, filters=None, sort_key=None, sort_dir=None):
"""Get all shares."""
return IMPL.share_get_all(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
)
def share_get_all_by_project(context, project_id, filters=None,
is_public=False, sort_key=None, sort_dir=None):
"""Returns all shares with given project ID."""
return IMPL.share_get_all_by_project(
context, project_id, filters=filters, is_public=is_public,
sort_key=sort_key, sort_dir=sort_dir,
)
def share_get_all_by_share_group_id(context, share_group_id,
filters=None, sort_key=None,
sort_dir=None):
"""Returns all shares with given project ID and share group id."""
return IMPL.share_get_all_by_share_group_id(
context, share_group_id, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
def share_get_all_by_share_server(context, share_server_id, filters=None,
sort_key=None, sort_dir=None):
"""Returns all shares with given share server ID."""
return IMPL.share_get_all_by_share_server(
context, share_server_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir,
)
def share_delete(context, share_id):
"""Delete share."""
return IMPL.share_delete(context, share_id)
###################
def share_access_create(context, values):
"""Allow access to share."""
return IMPL.share_access_create(context, values)
def share_access_get(context, access_id):
"""Get share access rule."""
return IMPL.share_access_get(context, access_id)
def share_access_get_all_for_share(context, share_id, filters=None):
"""Get all access rules for given share."""
return IMPL.share_access_get_all_for_share(context, share_id,
filters=filters)
def share_access_get_all_for_instance(context, instance_id, filters=None,
with_share_access_data=True):
"""Get all access rules related to a certain share instance."""
return IMPL.share_access_get_all_for_instance(
context, instance_id, filters=filters,
with_share_access_data=with_share_access_data)
def share_access_get_all_by_type_and_access(context, share_id, access_type,
access):
"""Returns share access by given type and access."""
return IMPL.share_access_get_all_by_type_and_access(
context, share_id, access_type, access)
def share_access_check_for_existing_access(context, share_id, access_type,
access_to):
"""Returns True if rule corresponding to the type and client exists."""
return IMPL.share_access_check_for_existing_access(
context, share_id, access_type, access_to)
def share_instance_access_create(context, values, share_instance_id):
"""Allow access to share instance."""
return IMPL.share_instance_access_create(
context, values, share_instance_id)
def share_instance_access_copy(context, share_id, instance_id):
"""Maps the existing access rules for the share to the instance in the DB.
Adds the instance mapping to the share's access rules and
returns the share's access rules.
"""
return IMPL.share_instance_access_copy(context, share_id, instance_id)
def share_instance_access_get(context, access_id, instance_id,
with_share_access_data=True):
"""Get access rule mapping for share instance."""
return IMPL.share_instance_access_get(
context, access_id, instance_id,
with_share_access_data=with_share_access_data)
def share_instance_access_update(context, access_id, instance_id, updates):
"""Update the access mapping row for a given share instance and access."""
return IMPL.share_instance_access_update(
context, access_id, instance_id, updates)
def share_instance_access_delete(context, mapping_id):
"""Deny access to share instance."""
return IMPL.share_instance_access_delete(context, mapping_id)
def share_access_metadata_update(context, access_id, metadata):
"""Update metadata of share access rule."""
return IMPL.share_access_metadata_update(context, access_id, metadata)
def share_access_metadata_delete(context, access_id, key):
"""Delete metadata of share access rule."""
return IMPL.share_access_metadata_delete(context, access_id, key)
####################
def share_snapshot_instance_update(context, instance_id, values):
"""Set the given properties on a share snapshot instance and update it.
Raises NotFound if snapshot instance does not exist.
"""
return IMPL.share_snapshot_instance_update(context, instance_id, values)
def share_snapshot_instance_create(context, snapshot_id, values):
"""Create a share snapshot instance for an existing snapshot."""
return IMPL.share_snapshot_instance_create(
context, snapshot_id, values)
def share_snapshot_instance_get(context, instance_id, with_share_data=False):
"""Get a snapshot instance or raise a NotFound exception."""
return IMPL.share_snapshot_instance_get(
context, instance_id, with_share_data=with_share_data)
def share_snapshot_instance_get_all_with_filters(context, filters,
with_share_data=False):
"""Get all snapshot instances satisfying provided filters."""
return IMPL.share_snapshot_instance_get_all_with_filters(
context, filters, with_share_data=with_share_data)
def share_snapshot_instance_delete(context, snapshot_instance_id):
"""Delete a share snapshot instance."""
return IMPL.share_snapshot_instance_delete(context, snapshot_instance_id)
####################
def share_snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.share_snapshot_create(context, values)
def share_snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.share_snapshot_get(context, snapshot_id)
def share_snapshot_get_all(context, filters=None, sort_key=None,
sort_dir=None):
"""Get all snapshots."""
return IMPL.share_snapshot_get_all(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
)
def share_snapshot_get_all_by_project(context, project_id, filters=None,
sort_key=None, sort_dir=None):
"""Get all snapshots belonging to a project."""
return IMPL.share_snapshot_get_all_by_project(
context, project_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir,
)
def share_snapshot_get_all_for_share(context, share_id, filters=None,
sort_key=None, sort_dir=None):
"""Get all snapshots for a share."""
return IMPL.share_snapshot_get_all_for_share(
context, share_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir,
)
def share_snapshot_get_latest_for_share(context, share_id):
"""Get the most recent snapshot for a share."""
return IMPL.share_snapshot_get_latest_for_share(context, share_id)
def share_snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.share_snapshot_update(context, snapshot_id, values)
###################
def share_snapshot_access_create(context, values):
"""Create a share snapshot access from the values dictionary."""
return IMPL.share_snapshot_access_create(context, values)
def share_snapshot_access_get(context, access_id):
"""Get share snapshot access rule from given access_id."""
return IMPL.share_snapshot_access_get(context, access_id)
def share_snapshot_access_get_all_for_snapshot_instance(
context, snapshot_instance_id, session=None):
"""Get all access rules related to a certain snapshot instance."""
return IMPL.share_snapshot_access_get_all_for_snapshot_instance(
context, snapshot_instance_id, session)
def share_snapshot_access_get_all_for_share_snapshot(context,
share_snapshot_id,
filters):
"""Get all access rules for a given share snapshot according to filters."""
return IMPL.share_snapshot_access_get_all_for_share_snapshot(
context, share_snapshot_id, filters)
def share_snapshot_check_for_existing_access(context, share_snapshot_id,
access_type, access_to):
"""Returns True if rule corresponding to the type and client exists."""
return IMPL.share_snapshot_check_for_existing_access(context,
share_snapshot_id,
access_type,
access_to)
def share_snapshot_export_locations_get(context, snapshot_id):
"""Get all export locations for a given share snapshot."""
return IMPL.share_snapshot_export_locations_get(context, snapshot_id)
def share_snapshot_instance_access_update(
context, access_id, instance_id, updates):
"""Update the state of the share snapshot instance access."""
return IMPL.share_snapshot_instance_access_update(
context, access_id, instance_id, updates)
def share_snapshot_instance_access_get(context, share_snapshot_instance_id,
access_id):
"""Get the share snapshot instance access related to given ids."""
return IMPL.share_snapshot_instance_access_get(
context, share_snapshot_instance_id, access_id)
def share_snapshot_instance_access_delete(context, access_id,
snapshot_instance_id):
"""Delete share snapshot instance access given its id."""
return IMPL.share_snapshot_instance_access_delete(
context, access_id, snapshot_instance_id)
def share_snapshot_instance_export_location_create(context, values):
"""Create a share snapshot instance export location."""
return IMPL.share_snapshot_instance_export_location_create(context, values)
def share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id):
"""Get the share snapshot instance export locations for given id."""
return IMPL.share_snapshot_instance_export_locations_get_all(
context, share_snapshot_instance_id)
def share_snapshot_instance_export_location_get(context, el_id):
"""Get the share snapshot instance export location for given id."""
return IMPL.share_snapshot_instance_export_location_get(
context, el_id)
def share_snapshot_instance_export_location_delete(context, el_id):
"""Delete share snapshot instance export location given its id."""
return IMPL.share_snapshot_instance_export_location_delete(context, el_id)
###################
def security_service_create(context, values):
"""Create security service DB record."""
return IMPL.security_service_create(context, values)
def security_service_delete(context, id):
"""Delete security service DB record."""
return IMPL.security_service_delete(context, id)
def security_service_update(context, id, values):
"""Update security service DB record."""
return IMPL.security_service_update(context, id, values)
def security_service_get(context, id):
"""Get security service DB record."""
return IMPL.security_service_get(context, id)
def security_service_get_all(context):
"""Get all security service DB records."""
return IMPL.security_service_get_all(context)
def security_service_get_all_by_project(context, project_id):
"""Get all security service DB records for the given project."""
return IMPL.security_service_get_all_by_project(context, project_id)
####################
def share_metadata_get(context, share_id):
"""Get all metadata for a share."""
return IMPL.share_metadata_get(context, share_id)
def share_metadata_delete(context, share_id, key):
"""Delete the given metadata item."""
IMPL.share_metadata_delete(context, share_id, key)
def share_metadata_update(context, share, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.share_metadata_update(context, share, metadata, delete)
###################
def share_export_location_get_by_uuid(context, export_location_uuid,
ignore_secondary_replicas=False):
"""Get specific export location of a share."""
return IMPL.share_export_location_get_by_uuid(
context, export_location_uuid,
ignore_secondary_replicas=ignore_secondary_replicas)
def share_export_locations_get(context, share_id):
"""Get all export locations of a share."""
return IMPL.share_export_locations_get(context, share_id)
def share_export_locations_get_by_share_id(context, share_id,
include_admin_only=True,
ignore_migration_destination=False,
ignore_secondary_replicas=False):
"""Get all export locations of a share by its ID."""
return IMPL.share_export_locations_get_by_share_id(
context, share_id, include_admin_only=include_admin_only,
ignore_migration_destination=ignore_migration_destination,
ignore_secondary_replicas=ignore_secondary_replicas)
def share_export_locations_get_by_share_instance_id(context,
share_instance_id,
include_admin_only=True):
"""Get all export locations of a share instance by its ID."""
return IMPL.share_export_locations_get_by_share_instance_id(
context, share_instance_id, include_admin_only=include_admin_only)
def share_export_locations_update(context, share_instance_id, export_locations,
delete=True):
"""Update export locations of a share instance."""
return IMPL.share_export_locations_update(
context, share_instance_id, export_locations, delete)
####################
def export_location_metadata_get(context, export_location_uuid, session=None):
"""Get all metadata of an export location."""
return IMPL.export_location_metadata_get(
context, export_location_uuid, session=session)
def export_location_metadata_delete(context, export_location_uuid, keys,
session=None):
"""Delete metadata of an export location."""
return IMPL.export_location_metadata_delete(
context, export_location_uuid, keys, session=session)
def export_location_metadata_update(context, export_location_uuid, metadata,
delete, session=None):
"""Update metadata of an export location."""
return IMPL.export_location_metadata_update(
context, export_location_uuid, metadata, delete, session=session)
####################
def share_network_create(context, values):
"""Create a share network DB record."""
return IMPL.share_network_create(context, values)
def share_network_delete(context, id):
"""Delete a share network DB record."""
return IMPL.share_network_delete(context, id)
def share_network_update(context, id, values):
"""Update a share network DB record."""
return IMPL.share_network_update(context, id, values)
def share_network_get(context, id):
"""Get requested share network DB record."""
return IMPL.share_network_get(context, id)
def share_network_get_all(context):
"""Get all share network DB records."""
return IMPL.share_network_get_all(context)
def share_network_get_all_by_project(context, project_id):
"""Get all share network DB records for the given project."""
return IMPL.share_network_get_all_by_project(context, project_id)
def share_network_get_all_by_security_service(context, security_service_id):
"""Get all share network DB records for the given project."""
return IMPL.share_network_get_all_by_security_service(
context, security_service_id)
def share_network_add_security_service(context, id, security_service_id):
return IMPL.share_network_add_security_service(context,
id,
security_service_id)
def share_network_remove_security_service(context, id, security_service_id):
return IMPL.share_network_remove_security_service(context,
id,
security_service_id)
def count_share_networks(context, project_id, user_id=None,
share_type_id=None, session=None):
return IMPL.count_share_networks(
context, project_id, user_id=user_id, share_type_id=share_type_id,
session=session,
)
##################
def network_allocation_create(context, values):
"""Create a network allocation DB record."""
return IMPL.network_allocation_create(context, values)
def network_allocation_delete(context, id):
"""Delete a network allocation DB record."""
return IMPL.network_allocation_delete(context, id)
def network_allocation_update(context, id, values):
"""Update a network allocation DB record."""
return IMPL.network_allocation_update(context, id, values)
def network_allocations_get_for_share_server(context, share_server_id,
session=None, label=None):
"""Get network allocations for share server."""
return IMPL.network_allocations_get_for_share_server(
context, share_server_id, label=label, session=session)
def network_allocations_get_by_ip_address(context, ip_address):
"""Get network allocations by IP address."""
return IMPL.network_allocations_get_by_ip_address(context, ip_address)
##################
def share_server_create(context, values):
"""Create share server DB record."""
return IMPL.share_server_create(context, values)
def share_server_delete(context, id):
"""Delete share server DB record."""
return IMPL.share_server_delete(context, id)
def share_server_update(context, id, values):
"""Update share server DB record."""
return IMPL.share_server_update(context, id, values)
def share_server_get(context, id, session=None):
"""Get share server DB record by ID."""
return IMPL.share_server_get(context, id, session=session)
def share_server_get_all_by_host_and_share_net_valid(context, host,
share_net_id,
session=None):
"""Get share server DB records by host and share net not error."""
return IMPL.share_server_get_all_by_host_and_share_net_valid(
context, host, share_net_id, session=session)
def share_server_get_all(context):
"""Get all share server DB records."""
return IMPL.share_server_get_all(context)
def share_server_get_all_by_host(context, host):
"""Get all share servers related to particular host."""
return IMPL.share_server_get_all_by_host(context, host)
def share_server_get_all_unused_deletable(context, host, updated_before):
"""Get all free share servers DB records."""
return IMPL.share_server_get_all_unused_deletable(context, host,
updated_before)
def share_server_backend_details_set(context, share_server_id, server_details):
"""Create DB record with backend details."""
return IMPL.share_server_backend_details_set(context, share_server_id,
server_details)
##################
def share_type_create(context, values, projects=None):
"""Create a new share type."""
return IMPL.share_type_create(context, values, projects)
def share_type_get_all(context, inactive=False, filters=None):
"""Get all share types.
:param context: context to query under
:param inactive: Include inactive share types to the result set
:param filters: Filters for the query in the form of key/value.
:is_public: Filter share types based on visibility:
* **True**: List public share types only
* **False**: List private share types only
* **None**: List both public and private share types
:returns: list of matching share types
"""
return IMPL.share_type_get_all(context, inactive, filters)
def share_type_get(context, type_id, inactive=False, expected_fields=None):
"""Get share type by id.
:param context: context to query under
:param type_id: share type id to get.
:param inactive: Consider inactive share types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: share type
"""
return IMPL.share_type_get(context, type_id, inactive, expected_fields)
def share_type_get_by_name(context, name):
"""Get share type by name."""
return IMPL.share_type_get_by_name(context, name)
def share_type_get_by_name_or_id(context, name_or_id):
"""Get share type by name or ID and return None if not found."""
return IMPL.share_type_get_by_name_or_id(context, name_or_id)
def share_type_access_get_all(context, type_id):
"""Get all share type access of a share type."""
return IMPL.share_type_access_get_all(context, type_id)
def share_type_access_add(context, type_id, project_id):
"""Add share type access for project."""
return IMPL.share_type_access_add(context, type_id, project_id)
def share_type_access_remove(context, type_id, project_id):
"""Remove share type access for project."""
return IMPL.share_type_access_remove(context, type_id, project_id)
def share_type_destroy(context, id):
"""Delete a share type."""
return IMPL.share_type_destroy(context, id)
####################
def share_type_extra_specs_get(context, share_type_id):
"""Get all extra specs for a share type."""
return IMPL.share_type_extra_specs_get(context, share_type_id)
def share_type_extra_specs_delete(context, share_type_id, key):
"""Delete the given extra specs item."""
return IMPL.share_type_extra_specs_delete(context, share_type_id, key)
def share_type_extra_specs_update_or_create(context, share_type_id,
extra_specs):
"""Create or update share type extra specs.
This adds or modifies the key/value pairs specified in the extra
specs dict argument.
"""
return IMPL.share_type_extra_specs_update_or_create(context,
share_type_id,
extra_specs)
def driver_private_data_get(context, entity_id, key=None, default=None):
"""Get one, list or all key-value pairs for given entity_id."""
return IMPL.driver_private_data_get(context, entity_id, key, default)
def driver_private_data_update(context, entity_id, details,
delete_existing=False):
"""Update key-value pairs for given entity_id."""
return IMPL.driver_private_data_update(context, entity_id, details,
delete_existing)
def driver_private_data_delete(context, entity_id, key=None):
"""Remove one, list or all key-value pairs for given entity_id."""
return IMPL.driver_private_data_delete(context, entity_id, key)
####################
def availability_zone_get(context, id_or_name):
"""Get availability zone by name or id."""
return IMPL.availability_zone_get(context, id_or_name)
def availability_zone_get_all(context):
"""Get all active availability zones."""
return IMPL.availability_zone_get_all(context)
####################
def share_group_get(context, share_group_id):
"""Get a share group or raise if it does not exist."""
return IMPL.share_group_get(context, share_group_id)
def share_group_get_all(context, detailed=True, filters=None, sort_key=None,
sort_dir=None):
"""Get all share groups."""
return IMPL.share_group_get_all(
context, detailed=detailed, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_get_all_by_host(context, host, detailed=True, filters=None,
sort_key=None, sort_dir=None):
"""Get all share groups belonging to a host."""
return IMPL.share_group_get_all_by_host(
context, host, detailed=detailed, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_create(context, values):
"""Create a share group from the values dictionary."""
return IMPL.share_group_create(context, values)
def share_group_get_all_by_share_server(context, share_server_id,
filters=None, sort_key=None,
sort_dir=None):
"""Get all share groups associated with a share server."""
return IMPL.share_group_get_all_by_share_server(
context, share_server_id, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_get_all_by_project(context, project_id, detailed=True,
filters=None, sort_key=None,
sort_dir=None):
"""Get all share groups belonging to a project."""
return IMPL.share_group_get_all_by_project(
context, project_id, detailed=detailed, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
def share_group_update(context, share_group_id, values):
"""Set the given properties on a share group and update it.
Raises NotFound if share group does not exist.
"""
return IMPL.share_group_update(context, share_group_id, values)
def share_group_destroy(context, share_group_id):
"""Destroy the share group or raise if it does not exist."""
return IMPL.share_group_destroy(context, share_group_id)
def count_shares_in_share_group(context, share_group_id):
"""Returns the number of undeleted shares with the specified group."""
return IMPL.count_shares_in_share_group(context, share_group_id)
def get_all_shares_by_share_group(context, share_group_id):
return IMPL.get_all_shares_by_share_group(context, share_group_id)
def count_share_group_snapshots_in_share_group(context, share_group_id):
"""Returns the number of sg snapshots with the specified share group."""
return IMPL.count_share_group_snapshots_in_share_group(
context, share_group_id)
def count_share_groups_in_share_network(context, share_network_id,
session=None):
"""Return the number of groups with the specified share network."""
return IMPL.count_share_groups_in_share_network(context, share_network_id)
def count_share_group_snapshot_members_in_share(context, share_id,
session=None):
"""Returns the number of group snapshot members linked to the share."""
return IMPL.count_share_group_snapshot_members_in_share(context, share_id)
def share_group_snapshot_get(context, share_group_snapshot_id):
"""Get a share group snapshot."""
return IMPL.share_group_snapshot_get(context, share_group_snapshot_id)
def share_group_snapshot_get_all(context, detailed=True, filters=None,
sort_key=None, sort_dir=None):
"""Get all share group snapshots."""
return IMPL.share_group_snapshot_get_all(
context, detailed=detailed, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def share_group_snapshot_get_all_by_project(context, project_id, detailed=True,
filters=None, sort_key=None,
sort_dir=None):
"""Get all share group snapshots belonging to a project."""
return IMPL.share_group_snapshot_get_all_by_project(
context, project_id, detailed=detailed, filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
def share_group_snapshot_create(context, values):
"""Create a share group snapshot from the values dictionary."""
return IMPL.share_group_snapshot_create(context, values)
def share_group_snapshot_update(context, share_group_snapshot_id, values):
"""Set the given properties on a share group snapshot and update it.
Raises NotFound if share group snapshot does not exist.
"""
return IMPL.share_group_snapshot_update(
context, share_group_snapshot_id, values)
def share_group_snapshot_destroy(context, share_group_snapshot_id):
"""Destroy the share_group_snapshot or raise if it does not exist."""
return IMPL.share_group_snapshot_destroy(context, share_group_snapshot_id)
def share_group_snapshot_members_get_all(context, share_group_snapshot_id):
"""Return the members of a share group snapshot."""
return IMPL.share_group_snapshot_members_get_all(
context, share_group_snapshot_id)
def share_group_snapshot_member_create(context, values):
"""Create a share group snapshot member from the values dictionary."""
return IMPL.share_group_snapshot_member_create(context, values)
def share_group_snapshot_member_update(context, member_id, values):
"""Set the given properties on a share group snapshot member and update it.
Raises NotFound if share_group_snapshot member does not exist.
"""
return IMPL.share_group_snapshot_member_update(context, member_id, values)
####################
def share_replicas_get_all(context, with_share_server=False,
with_share_data=False):
"""Returns all share replicas regardless of share."""
return IMPL.share_replicas_get_all(
context, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replicas_get_all_by_share(context, share_id, with_share_server=False,
with_share_data=False):
"""Returns all share replicas for a given share."""
return IMPL.share_replicas_get_all_by_share(
context, share_id, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replicas_get_available_active_replica(context, share_id,
with_share_server=False,
with_share_data=False):
"""Returns an active replica for a given share."""
return IMPL.share_replicas_get_available_active_replica(
context, share_id, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replica_get(context, replica_id, with_share_server=False,
with_share_data=False):
"""Get share replica by id."""
return IMPL.share_replica_get(
context, replica_id, with_share_server=with_share_server,
with_share_data=with_share_data)
def share_replica_update(context, share_replica_id, values,
with_share_data=False):
"""Updates a share replica with given values."""
return IMPL.share_replica_update(context, share_replica_id, values,
with_share_data=with_share_data)
def share_replica_delete(context, share_replica_id):
"""Deletes a share replica."""
return IMPL.share_replica_delete(context, share_replica_id)
def purge_deleted_records(context, age_in_days):
"""Purge deleted rows older than given age from all tables
:raises: InvalidParameterValue if age_in_days is incorrect.
"""
return IMPL.purge_deleted_records(context, age_in_days=age_in_days)
####################
def share_group_type_create(context, values, projects=None):
"""Create a new share group type."""
return IMPL.share_group_type_create(context, values, projects)
def share_group_type_get_all(context, inactive=False, filters=None):
"""Get all share group types.
:param context: context to query under
:param inactive: Include inactive share group types to the result set
:param filters: Filters for the query in the form of key/value.
:is_public: Filter share group types based on visibility:
* **True**: List public group types only
* **False**: List private group types only
* **None**: List both public and private group types
:returns: list of matching share group types
"""
return IMPL.share_group_type_get_all(context, inactive, filters)
def share_group_type_get(context, type_id, inactive=False,
expected_fields=None):
"""Get share_group type by id.
:param context: context to query under
:param type_id: group type id to get.
:param inactive: Consider inactive group types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: share group type
"""
return IMPL.share_group_type_get(
context, type_id, inactive, expected_fields)
def share_group_type_get_by_name(context, name):
"""Get share group type by name."""
return IMPL.share_group_type_get_by_name(context, name)
def share_group_type_access_get_all(context, type_id):
"""Get all share group type access of a share group type."""
return IMPL.share_group_type_access_get_all(context, type_id)
def share_group_type_access_add(context, type_id, project_id):
"""Add share group type access for project."""
return IMPL.share_group_type_access_add(context, type_id, project_id)
def share_group_type_access_remove(context, type_id, project_id):
"""Remove share group type access for project."""
return IMPL.share_group_type_access_remove(context, type_id, project_id)
def share_group_type_destroy(context, type_id):
"""Delete a share group type."""
return IMPL.share_group_type_destroy(context, type_id)
def share_group_type_specs_get(context, type_id):
"""Get all group specs for a share group type."""
return IMPL.share_group_type_specs_get(context, type_id)
def share_group_type_specs_delete(context, type_id, key):
"""Delete the given group specs item."""
return IMPL.share_group_type_specs_delete(context, type_id, key)
def share_group_type_specs_update_or_create(context, type_id, group_specs):
"""Create or update share group type specs.
This adds or modifies the key/value pairs specified in the group
specs dict argument.
"""
return IMPL.share_group_type_specs_update_or_create(
context, type_id, group_specs)
####################
def message_get(context, message_id):
"""Return a message with the specified ID."""
return IMPL.message_get(context, message_id)
def message_get_all(context, filters=None, sort_key=None, sort_dir=None):
"""Returns all messages with the project of the specified context."""
return IMPL.message_get_all(context, filters=filters, sort_key=sort_key,
sort_dir=sort_dir)
def message_create(context, values):
"""Creates a new message with the specified values."""
return IMPL.message_create(context, values)
def message_destroy(context, message_id):
"""Deletes message with the specified ID."""
return IMPL.message_destroy(context, message_id)
def cleanup_expired_messages(context):
"""Soft delete expired messages"""
return IMPL.cleanup_expired_messages(context)
def backend_info_get(context, host):
"""Get hash info for given host."""
return IMPL.backend_info_get(context, host)
def backend_info_update(context, host, value=None,
delete_existing=False):
"""Update hash info for host."""
return IMPL.backend_info_update(context, host=host, value=value,
delete_existing=delete_existing)
| 36.389413 | 79 | 0.700963 |
ace3f825a5df92980a854924af18fa9cb316ce91 | 2,644 | py | Python | adminmgr/media/code/A2/python/task/BD_188_1000_1767_XfZCfQz.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A2/python/task/BD_188_1000_1767_XfZCfQz.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A2/python/task/BD_188_1000_1767_XfZCfQz.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | from __future__ import print_function
import re
import sys
from operator import add
from pyspark.sql import SparkSession
def computeContribs(urls, rank):
num_urls = len(urls)
for url in urls:
yield (url, float (rank) /float(num_urls))
def compute(key,val):
if(val>1):
return key,val
return key,1
def parseNeighbors(urls):
parts = re.split(r',', urls)
return parts[0],float(parts[2])/float(parts[3])
def parseNeigbors1(urls):
parts = re.split(r',',urls)
return parts[0],parts[1]
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: pagerank <file> <iterations>", file=sys.stderr)
sys.exit(-1)
# Initialize the spark context.
spark = SparkSession\
.builder\
.appName("PythonPageRank")\
.getOrCreate()
some_value = float((float(sys.argv[3]))/100)
if (some_value == 0):
some_value = 0.8
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
links2 = lines.map(lambda urls: parseNeighbors(urls)).groupByKey().mapValues(sum).cache()
ranks=links2.map(lambda x:compute(x[0],x[1]))
prevranks=links2.map(lambda x:compute(x[0],x[1]))
links1=lines.map(lambda urls: parseNeigbors1(urls)).groupByKey().cache()
count_value = 0
count = 0
t = True
if(int(sys.argv[2]) != 0):
t = False
for iteration in range(int(sys.argv[2])):
contribs = links1.join(ranks).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank * (some_value) + (1-some_value))
contribs = links1.join(ranks).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))
prevranks = contribs.reduceByKey(add).mapValues(lambda rank: rank * (float(some_value)) + (float(1-some_value)))
while(t):
count = 0
count_value = 0
contribs = links1.join(prevranks).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank * (some_value) + (1-some_value))
temp = ranks.join(prevranks)
for i in temp.collect():
if(abs(i[1][0]-i[1][1])<0.0001):
count_value+=1
for i in ranks.collect():
count+=1
if(count == count_value):
t = False
prevranks = ranks
result=sorted(ranks.collect(),key=lambda x:(-x[1],x[0]))
for (link, rank) in result:
print(link,"{0:.12f}".format(rank),sep=",")
#print("%s,%s" % (link, rank))
spark.stop()
| 31.855422 | 130 | 0.628215 |
ace3f8458e4056e051ebb86bceafeac90ad24661 | 3,319 | py | Python | Lib/lib2to3/fixes/fix_except.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | 1 | 2021-12-26T22:20:34.000Z | 2021-12-26T22:20:34.000Z | Lib/lib2to3/fixes/fix_except.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | null | null | null | Lib/lib2to3/fixes/fix_except.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | """Fixer for except statements with named exceptions.
The following cases will be converted:
- "except E, T:" where T is a name:
except E as T:
- "except E, T:" where T is not a name, tuple or list:
except E as t:
T = t
This is done because the target of an "except" clause must be a
name.
- "except E, T:" where T is a tuple or list literal:
except E as t:
T = t.args
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
def find_excepts(nodes):
for i, n in enumerate(nodes):
if n.type == syms.except_clause:
if n.children[0].value == 'except':
yield (n, nodes[i+2])
class FixExcept(fixer_base.BaseFix):
PATTERN = """
try_stmt< 'try' ':' (simple_stmt | suite)
cleanup=(except_clause ':' (simple_stmt | suite))+
tail=(['except' ':' (simple_stmt | suite)]
['else' ':' (simple_stmt | suite)]
['finally' ':' (simple_stmt | suite)]) >
"""
def transform(self, node, results):
syms = self.syms
tail = [n.clone() for n in results["tail"]]
try_cleanup = [ch.clone() for ch in results["cleanup"]]
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
comma.replace(Name("as", prefix=" "))
if N.type != token.NAME:
# Generate a new N for the except clause
new_N = Name(self.new_name(), prefix=" ")
target = N.clone()
target.prefix = ""
N.replace(new_N)
new_N = new_N.clone()
# Insert "old_N = new_N" as the first statement in
# the except body. This loop skips leading whitespace
# and indents
#TODO(cwinter) suite-cleanup
suite_stmts = e_suite.children
for i, stmt in enumerate(suite_stmts):
if isinstance(stmt, pytree.Node):
break
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
assign = Assign(target, Attr(new_N, Name('args')))
else:
assign = Assign(target, new_N)
#TODO(cwinter) stopgap until children becomes a smart list
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
elif N.prefix == "":
# No space after a comma is legal; no space after "as",
# not so much.
N.prefix = " "
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
return pytree.Node(node.type, children)
| 35.688172 | 78 | 0.517023 |
ace3f91d6e8a6d8003a957fa555dd222ec433843 | 176 | py | Python | {{cookiecutter.project_name}}/tests/test_entrypoint1.py | TvanMeer/cookiecutter-python-package | 4ffbe8658bbd371f3e505ba389f02dbe9c1d74f1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/tests/test_entrypoint1.py | TvanMeer/cookiecutter-python-package | 4ffbe8658bbd371f3e505ba389f02dbe9c1d74f1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/tests/test_entrypoint1.py | TvanMeer/cookiecutter-python-package | 4ffbe8658bbd371f3e505ba389f02dbe9c1d74f1 | [
"MIT"
] | null | null | null | import pytest
from {{cookiecutter.project_name}}.module1.entrypoint1 import X
@pytest.fixture
def x():
return X()
def test_do_something(x):
assert x.do_something() | 14.666667 | 63 | 0.732955 |
ace3f972f578856f4e7c7065c33959b94928203b | 650 | py | Python | extract_reordered.py | vappiah/bacterial-genomics | 66d0311e9422f6a020bbd3d7ddda3d9afc26a161 | [
"MIT"
] | null | null | null | extract_reordered.py | vappiah/bacterial-genomics | 66d0311e9422f6a020bbd3d7ddda3d9afc26a161 | [
"MIT"
] | null | null | null | extract_reordered.py | vappiah/bacterial-genomics | 66d0311e9422f6a020bbd3d7ddda3d9afc26a161 | [
"MIT"
] | null | null | null |
import sys
from Bio.SeqUtils import GC
from Bio import SeqIO
fastafile=sys.argv[1]
ID=sys.argv[2]
#read fasta file
allseq=[i for i in SeqIO.parse(fastafile,'fasta')]
#extract sequence with the RagTag label
reordered=[i for i in allseq if 'RagTag' in i.id and ID in i.id][0]
reordered.id='P7741'
reordered.name=''
reordered.description=''
gc=reordered.seq.count('G')+ reordered.seq.count('C')
gc_percent=GC(reordered.seq)
sequence_length=len(reordered.seq)
print('Sequence Length: %d bp'%sequence_length)
print('GC Percent: %0.2f'%gc_percent)
SeqIO.write(reordered,'P7741.reordered.fasta','fasta')
print('draft genome sequence extracted')
| 20.967742 | 67 | 0.749231 |
ace3fa50deb2cec9632caa53efd04692957a6796 | 1,157 | py | Python | exs/mundo_2/python/056.py | QuatroQuatros/exercicios-CeV | c9b995b717fe1dd2c2eee3557db0161390bc78b0 | [
"MIT"
] | 45 | 2021-01-02T18:36:01.000Z | 2022-03-26T19:46:47.000Z | exs/mundo_2/python/056.py | QuatroQuatros/exercicios-CeV | c9b995b717fe1dd2c2eee3557db0161390bc78b0 | [
"MIT"
] | 24 | 2020-12-31T17:23:16.000Z | 2021-03-11T19:44:36.000Z | exs/mundo_2/python/056.py | QuatroQuatros/exercicios-CeV | c9b995b717fe1dd2c2eee3557db0161390bc78b0 | [
"MIT"
] | 28 | 2020-12-30T15:57:16.000Z | 2022-03-26T19:46:49.000Z | """
Desafio 056
Problema: Desenvolva um programa que leia o nome, idade e sexo
de 4 pessoas. No final do programa,
mostre: A média de idade do grupo.
Qual é o nome do homem mais velho.
Quantas mulheres tem menos de 20 anos.
Resolução do problemas:
"""
idadeMedia = 0
nomeHomemMaisVelho = ''
idadeHomemVelho = 0
qtdMulherMenor20 = 0
for c in range(4):
nome = input('informe o nome da {}º pessoa: '.format(c + 1)).strip().capitalize()
idade = int(input('Informe a idade da {}º pessoa: '.format(c + 1)))
sexo = input('Informe o sexo da {}º pessoa: [M/F]'.format(c + 1)).strip().upper()
print()
idadeMedia += idade
if sexo == 'M':
if idade > idadeHomemVelho:
idadeHomemVelho = idade
nomeHomemMaisVelho = nome
elif sexo == 'F':
if idade < 20:
qtdMulherMenor20 += 1
print('A idade média do grupo é de {} anos'.format(idadeMedia / 4))
print('O {} é o homem mais velho do grupo, com {} anos.'.format(nomeHomemMaisVelho, idadeHomemVelho))
print('No grupo existe {} mulher(es) com menos de 20 anos'.format(qtdMulherMenor20))
| 32.138889 | 101 | 0.624028 |
ace3fb3186df37769acb3f3a07fb97be83174404 | 2,582 | py | Python | tools/xpack-riscv-none-embed-gcc-8.3.0-1.2/riscv-none-embed/lib/rv32eac/ilp32e/libstdc++.a-gdb.py | shllvii/tinyriscv | 3bfef2b013691063dd0712aa415f0a732be3a6b9 | [
"Apache-2.0"
] | 1 | 2021-02-02T06:29:48.000Z | 2021-02-02T06:29:48.000Z | tools/xpack-riscv-none-embed-gcc-8.3.0-1.2/riscv-none-embed/lib/rv32eac/ilp32e/libstdc++.a-gdb.py | shllvii/tinyriscv | 3bfef2b013691063dd0712aa415f0a732be3a6b9 | [
"Apache-2.0"
] | null | null | null | tools/xpack-riscv-none-embed-gcc-8.3.0-1.2/riscv-none-embed/lib/rv32eac/ilp32e/libstdc++.a-gdb.py | shllvii/tinyriscv | 3bfef2b013691063dd0712aa415f0a732be3a6b9 | [
"Apache-2.0"
] | null | null | null | # -*- python -*-
# Copyright (C) 2009-2018 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Host/home/ilg/Work/riscv-none-embed-gcc-8.3.0-1.2/linux-x64/install/riscv-none-embed-gcc/share/gcc-riscv-none-embed'
libdir = '/Host/home/ilg/Work/riscv-none-embed-gcc-8.3.0-1.2/linux-x64/install/riscv-none-embed-gcc/riscv-none-embed/lib/rv32eac/ilp32e'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 41.645161 | 136 | 0.725794 |
ace3fdcafd24decbee4d3e758249b1792b577f13 | 5,298 | py | Python | setup.py | philippjfr/idom | c638ccacfa7ffcfeb3e1eb4f264ae58526d5cd16 | [
"MIT"
] | null | null | null | setup.py | philippjfr/idom | c638ccacfa7ffcfeb3e1eb4f264ae58526d5cd16 | [
"MIT"
] | null | null | null | setup.py | philippjfr/idom | c638ccacfa7ffcfeb3e1eb4f264ae58526d5cd16 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import pipes
import shutil
import subprocess
import sys
import traceback
from distutils import log
from distutils.command.build import build # type: ignore
from distutils.command.sdist import sdist # type: ignore
from setuptools import find_packages, setup
from setuptools.command.develop import develop
if sys.platform == "win32":
from subprocess import list2cmdline
else:
def list2cmdline(cmd_list):
return " ".join(map(pipes.quote, cmd_list))
# the name of the project
name = "idom"
# basic paths used to gather files
here = os.path.abspath(os.path.dirname(__file__))
root = os.path.join(here, name)
# -----------------------------------------------------------------------------
# Package Definition
# -----------------------------------------------------------------------------
package = {
"name": name,
"python_requires": ">=3.7",
"packages": find_packages(exclude=["tests*"]),
"description": "Control the web with Python",
"author": "Ryan Morshead",
"author_email": "ryan.morshead@gmail.com",
"url": "https://github.com/rmorshea/idom",
"license": "MIT",
"platforms": "Linux, Mac OS X, Windows",
"keywords": ["interactive", "widgets", "DOM", "React"],
"include_package_data": True,
"zip_safe": False,
"setup_requires": ["setuptools_scm"],
"use_scm_version": True,
"classifiers": [
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Graphics",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
}
# -----------------------------------------------------------------------------
# CLI Entrypoints
# -----------------------------------------------------------------------------
package["entry_points"] = {
"console_scripts": ["idom = idom.__main__:main"],
}
# -----------------------------------------------------------------------------
# Requirements
# -----------------------------------------------------------------------------
requirements = []
with open(os.path.join(here, "requirements", "pkg-deps.txt"), "r") as f:
for line in map(str.strip, f):
if not line.startswith("#"):
requirements.append(line)
package["install_requires"] = requirements
_current_extras = []
extra_requirements = {"all": []} # type: ignore
extra_requirements_path = os.path.join(here, "requirements", "pkg-extras.txt")
with open(extra_requirements_path, "r") as f:
for line in map(str.strip, f):
if line.startswith("#") and line[1:].strip().startswith("extra="):
_current_extras = [e.strip() for e in line.split("=", 1)[1].split(",")]
if "all" in _current_extras:
raise ValueError("%r uses the reserved extra name 'all'")
for e in _current_extras:
extra_requirements[e] = []
elif _current_extras:
for e in _current_extras:
extra_requirements[e].append(line)
extra_requirements["all"].append(line)
elif line:
msg = "No '# extra=<name>' header before requirements in %r"
raise ValueError(msg % extra_requirements_path)
package["extras_require"] = extra_requirements
# -----------------------------------------------------------------------------
# Library Description
# -----------------------------------------------------------------------------
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
package["long_description"] = long_description
package["long_description_content_type"] = "text/markdown"
# ----------------------------------------------------------------------------
# Build Javascript
# ----------------------------------------------------------------------------
def build_javascript_first(cls):
class Command(cls):
def run(self):
log.info("Installing Javascript...")
try:
js_dir = os.path.join(root, "client", "app")
for cmd, *args in map(str.split, ["npm install", "npm run build"]):
which_cmd = shutil.which(cmd)
if which_cmd is None:
raise RuntimeError(
f"Failed to run command - {cmd!r} is not installed."
)
cmd_args = [which_cmd] + args
log.info(f"> {list2cmdline(cmd_args)}")
subprocess.check_call(cmd_args, cwd=js_dir)
except Exception:
log.error("Failed to install Javascript")
log.error(traceback.format_exc())
raise
else:
log.info("Successfully installed Javascript")
super().run()
return Command
package["cmdclass"] = {
"sdist": build_javascript_first(sdist),
"build": build_javascript_first(build),
"develop": build_javascript_first(develop),
}
# -----------------------------------------------------------------------------
# Install It
# -----------------------------------------------------------------------------
if __name__ == "__main__":
setup(**package)
| 32.503067 | 83 | 0.503964 |
ace3ff26382d5190132f25ac28e67d20c4f98830 | 1,110 | py | Python | ivy/neural_net_stateful/activations.py | ashok-arjun/ivy | 274c03f667cda10e09b75e90e7a3a46b358e0358 | [
"Apache-2.0"
] | 161 | 2021-01-20T22:11:13.000Z | 2022-01-09T09:46:33.000Z | ivy/neural_net_stateful/activations.py | ashok-arjun/ivy | 274c03f667cda10e09b75e90e7a3a46b358e0358 | [
"Apache-2.0"
] | 4 | 2021-11-10T17:04:36.000Z | 2021-11-26T06:40:43.000Z | ivy/neural_net_stateful/activations.py | ashok-arjun/ivy | 274c03f667cda10e09b75e90e7a3a46b358e0358 | [
"Apache-2.0"
] | 8 | 2021-02-17T20:56:33.000Z | 2022-01-09T16:45:40.000Z | """
Collection of Ivy neural network activations as stateful classes.
"""
# local
import ivy
from ivy.neural_net_stateful.module import Module
class GELU(Module):
def __init__(self):
"""
Applies the GELU activation function.
"""
Module.__init__(self)
def _forward(self, inputs):
"""
Perform forward pass of the GELU activation.
:param inputs: Inputs to process *[batch_shape, d]*.
:type inputs: array
:return: The outputs following the GELU activation *[batch_shape, d]*
"""
return ivy.gelu(inputs)
class GEGLU(Module):
def __init__(self):
"""
Applies the GEGLU activation function.
"""
Module.__init__(self)
def _forward(self, inputs):
"""
Perform forward pass of the GEGLU activation.
:param inputs: Inputs to process *[batch_shape, 2d]*.
:type inputs: array
:return: The outputs following the GEGLU activation *[batch_shape, d]*
"""
x, gates = ivy.split(inputs, 2, -1)
return ivy.gelu(gates) * x
| 23.617021 | 78 | 0.602703 |
ace3ff48f2108740a178064e13a03b1bdfa42506 | 10,815 | py | Python | src/engine/SCons/Scanner/ProgTests.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | src/engine/SCons/Scanner/ProgTests.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | src/engine/SCons/Scanner/ProgTests.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import sys
import unittest
import TestCmd
import TestUnit
import SCons.Node.FS
import SCons.Scanner.Prog
import SCons.Subst
test = TestCmd.TestCmd(workdir = '')
test.subdir('d1', ['d1', 'd2'], 'dir', ['dir', 'sub'])
libs = [ 'l1.lib', 'd1/l2.lib', 'd1/d2/l3.lib',
'dir/libfoo.a', 'dir/sub/libbar.a', 'dir/libxyz.other']
for h in libs:
test.write(h, "\n")
# define some helpers:
class DummyEnvironment(object):
def __init__(self, **kw):
self._dict = {'LIBSUFFIXES' : '.lib'}
self._dict.update(kw)
self.fs = SCons.Node.FS.FS(test.workpath(''))
def Dictionary(self, *args):
if not args:
return self._dict
elif len(args) == 1:
return self._dict[args[0]]
else:
return [self._dict[x] for x in args]
def has_key(self, key):
return key in self.Dictionary()
def __getitem__(self,key):
return self.Dictionary()[key]
def __setitem__(self,key,value):
self.Dictionary()[key] = value
def __delitem__(self,key):
del self.Dictionary()[key]
def subst(self, s, target=None, source=None, conv=None):
return SCons.Subst.scons_subst(s, self, gvars=self._dict, lvars=self._dict)
def subst_path(self, path, target=None, source=None, conv=None):
if not isinstance(path, list):
path = [path]
return list(map(self.subst, path))
def get_factory(self, factory):
return factory or self.fs.File
def Dir(self, filename):
return self.fs.Dir(test.workpath(filename))
def File(self, filename):
return self.fs.File(test.workpath(filename))
class DummyNode(object):
def __init__(self, name):
self.name = name
def rexists(self):
return 1
def __str__(self):
return self.name
def deps_match(deps, libs):
deps=sorted(map(str, deps))
libs.sort()
return list(map(os.path.normpath, deps)) == list(map(os.path.normpath, libs))
# define some tests:
class ProgramScannerTestCase1(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[ test.workpath("") ],
LIBS=[ 'l1', 'l2', 'l3' ])
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['l1.lib']), list(map(str, deps))
env = DummyEnvironment(LIBPATH=[ test.workpath("") ],
LIBS='l1')
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['l1.lib']), list(map(str, deps))
f1 = env.fs.File(test.workpath('f1'))
env = DummyEnvironment(LIBPATH=[ test.workpath("") ],
LIBS=[f1])
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps[0] is f1, deps
f2 = env.fs.File(test.workpath('f1'))
env = DummyEnvironment(LIBPATH=[ test.workpath("") ],
LIBS=f2)
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps[0] is f2, deps
class ProgramScannerTestCase2(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=list(map(test.workpath,
["", "d1", "d1/d2" ])),
LIBS=[ 'l1', 'l2', 'l3' ])
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['l1.lib', 'd1/l2.lib', 'd1/d2/l3.lib' ]), list(map(str, deps))
class ProgramScannerTestCase3(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[test.workpath("d1/d2"),
test.workpath("d1")],
LIBS='l2 l3'.split())
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['d1/l2.lib', 'd1/d2/l3.lib']), list(map(str, deps))
class ProgramScannerTestCase5(unittest.TestCase):
def runTest(self):
class SubstEnvironment(DummyEnvironment):
def subst(self, arg, target=None, source=None, conv=None, path=test.workpath("d1")):
if arg == "$blah":
return test.workpath("d1")
else:
return arg
env = SubstEnvironment(LIBPATH=[ "$blah" ],
LIBS='l2 l3'.split())
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, [ 'd1/l2.lib' ]), list(map(str, deps))
class ProgramScannerTestCase6(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[ test.workpath("dir") ],
LIBS=['foo', 'sub/libbar', 'xyz.other'],
LIBPREFIXES=['lib'],
LIBSUFFIXES=['.a'])
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['dir/libfoo.a', 'dir/sub/libbar.a', 'dir/libxyz.other']), list(map(str, deps))
class ProgramScannerTestCase7(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[ test.workpath("dir") ],
LIBS=['foo', '$LIBBAR', '$XYZ'],
LIBPREFIXES=['lib'],
LIBSUFFIXES=['.a'],
LIBBAR='sub/libbar',
XYZ='xyz.other')
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['dir/libfoo.a', 'dir/sub/libbar.a', 'dir/libxyz.other']), list(map(str, deps))
class ProgramScannerTestCase8(unittest.TestCase):
def runTest(self):
n1 = DummyNode('n1')
env = DummyEnvironment(LIBPATH=[ test.workpath("dir") ],
LIBS=[n1],
LIBPREFIXES=['p1-', 'p2-'],
LIBSUFFIXES=['.1', '2'])
s = SCons.Scanner.Prog.ProgramScanner(node_class = DummyNode)
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps == [n1], deps
n2 = DummyNode('n2')
env = DummyEnvironment(LIBPATH=[ test.workpath("dir") ],
LIBS=[n1, [n2]],
LIBPREFIXES=['p1-', 'p2-'],
LIBSUFFIXES=['.1', '2'])
s = SCons.Scanner.Prog.ProgramScanner(node_class = DummyNode)
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps == [n1, n2], deps
class ProgramScannerTestCase9(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[ test.workpath("dir") ],
LIBS=['foo', '$LIBBAR'],
LIBPREFIXES=['lib'],
LIBSUFFIXES=['.a'],
LIBBAR=['sub/libbar', 'xyz.other'])
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['dir/libfoo.a', 'dir/sub/libbar.a', 'dir/libxyz.other']), list(map(str, deps))
class ProgramScannerTestCase10(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[ test.workpath("dir") ],
LIBS=['foo', '$LIBBAR'],
LIBPREFIXES=['lib'],
LIBSUFFIXES=['.a'],
LIBBAR='sub/libbar $LIBBAR2',
LIBBAR2=['xyz.other'])
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['dir/libfoo.a', 'dir/sub/libbar.a', 'dir/libxyz.other']), list(map(str, deps))
def suite():
suite = unittest.TestSuite()
suite.addTest(ProgramScannerTestCase1())
suite.addTest(ProgramScannerTestCase2())
suite.addTest(ProgramScannerTestCase3())
suite.addTest(ProgramScannerTestCase5())
suite.addTest(ProgramScannerTestCase6())
suite.addTest(ProgramScannerTestCase7())
suite.addTest(ProgramScannerTestCase8())
suite.addTest(ProgramScannerTestCase9())
suite.addTest(ProgramScannerTestCase10())
try: unicode
except NameError: pass
else:
code = """if 1:
class ProgramScannerTestCase4(unittest.TestCase):
def runTest(self):
env = DummyEnvironment(LIBPATH=[test.workpath("d1/d2"),
test.workpath("d1")],
LIBS=u'l2 l3'.split())
s = SCons.Scanner.Prog.ProgramScanner()
path = s.path(env)
deps = s(DummyNode('dummy'), env, path)
assert deps_match(deps, ['d1/l2.lib', 'd1/d2/l3.lib']), map(str, deps)
suite.addTest(ProgramScannerTestCase4())
\n"""
exec code
return suite
if __name__ == "__main__":
TestUnit.run(suite())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 38.080986 | 111 | 0.565418 |
ace4009f3ef762f3027097950bc7887d9daa2b93 | 33,605 | py | Python | src/sage/combinat/rooted_tree.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/combinat/rooted_tree.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/combinat/rooted_tree.py | dimpase/sage | 468f23815ade42a2192b0a9cd378de8fdc594dcd | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | r"""
Rooted (Unordered) Trees
AUTHORS:
- Florent Hivert (2011): initial version
"""
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.sets_cat import Sets
from sage.combinat.abstract_tree import (AbstractClonableTree,
AbstractLabelledClonableTree)
from sage.misc.cachefunc import cached_function, cached_method
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
from sage.misc.lazy_attribute import lazy_attribute, lazy_class_attribute
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
from sage.sets.disjoint_union_enumerated_sets import DisjointUnionEnumeratedSets
from sage.sets.family import Family
from sage.sets.non_negative_integers import NonNegativeIntegers
from sage.structure.list_clone import NormalizedClonableList
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
@cached_function
def number_of_rooted_trees(n):
r"""
Return the number of rooted trees with `n` nodes.
Compute the number `a(n)` of rooted trees with `n` nodes using the
recursive formula ([SL000081]_):
.. MATH::
a(n+1) = \frac{1}{n} \sum_{k=1}^{n} \left( \sum_{d|k} d a(d) \right) a(n-k+1)
EXAMPLES::
sage: from sage.combinat.rooted_tree import number_of_rooted_trees
sage: [number_of_rooted_trees(i) for i in range(10)]
[0, 1, 1, 2, 4, 9, 20, 48, 115, 286]
REFERENCES:
.. [SL000081] Sloane's :oeis:`A000081`
"""
if n == 0:
return Integer(0)
if n == 1:
return Integer(1)
n = Integer(n)
return sum(sum(d * number_of_rooted_trees(d) for d in k.divisors()) *
number_of_rooted_trees(n - k)
for k in ZZ.range(1, n)) // (n - 1)
class RootedTree(AbstractClonableTree, NormalizedClonableList,
metaclass=InheritComparisonClasscallMetaclass):
r"""
The class for unordered rooted trees.
The *unordered rooted trees* are an inductive datatype defined
as follows: An unordered rooted tree is a multiset of
unordered rooted trees. The trees that belong to this
multiset are said to be the *children* of the tree. The tree
that has no children is called a *leaf*.
The *labelled rooted trees* (:class:`LabelledRootedTree`)
form a subclass of this class; they carry additional data.
One can create a tree from any list (or more generally iterable)
of trees or objects convertible to a tree.
EXAMPLES::
sage: RootedTree([])
[]
sage: RootedTree([[], [[]]])
[[], [[]]]
sage: RootedTree([[[]], []])
[[], [[]]]
sage: O = OrderedTree([[[]], []]); O
[[[]], []]
sage: RootedTree(O) # this is O with the ordering forgotten
[[], [[]]]
One can also enter any small rooted tree ("small" meaning that
no vertex has more than `15` children) by using a simple
numerical encoding of rooted trees, namely, the
:func:`~sage.combinat.abstract_tree.from_hexacode` function.
(This function actually parametrizes ordered trees, and here
we make it parametrize unordered trees by forgetting the
ordering.) ::
sage: from sage.combinat.abstract_tree import from_hexacode
sage: RT = RootedTrees()
sage: from_hexacode('32001010', RT)
[[[]], [[]], [[], []]]
.. NOTE::
Unlike an ordered tree, an (unordered) rooted tree is a
multiset (rather than a list) of children. That is, two
ordered trees which differ from each other by switching
the order of children are equal to each other as (unordered)
rooted trees. Internally, rooted trees are encoded as
:class:`sage.structure.list_clone.NormalizedClonableList`
instances, and instead of storing their children as an
actual multiset, they store their children as a list which
is sorted according to their :meth:`sort_key` value. This
is as good as storing them as multisets, since the
:meth:`sort_key` values are sortable and distinguish
different (unordered) trees. However, if you wish to define
a subclass of :class:`RootedTree` which implements rooted
trees with extra structure (say, a class of edge-colored
rooted trees, or a class of rooted trees with a cyclic
order on the list of children), then the inherited
:meth:`sort_key` method will no longer distinguish different
trees (and, as a consequence, equal trees will be regarded
as distinct). Thus, you will have to override the method by
one that does distinguish different trees.
"""
# Standard auto-parent trick
@staticmethod
def __classcall_private__(cls, *args, **opts):
"""
Ensure that rooted trees created by the enumerated sets and directly
are the same and that they are instances of :class:`RootedTree`.
TESTS::
sage: from sage.combinat.rooted_tree import (RootedTrees_all,
....: RootedTrees_size)
sage: issubclass(RootedTrees_all().element_class, RootedTree)
True
sage: issubclass(RootedTrees_size(3).element_class, RootedTree)
True
sage: t0 = RootedTree([[],[[]]])
sage: t0.parent()
Rooted trees
sage: type(t0)
<class 'sage.combinat.rooted_tree.RootedTrees_all_with_category.element_class'>
sage: t1 = RootedTrees()([[],[[]]])
sage: t1.parent() is t0.parent()
True
sage: type(t1) is type(t0)
True
sage: t1 = RootedTrees(4)([[],[[]]])
sage: t1.parent() is t0.parent()
True
sage: type(t1) is type(t0)
True
"""
return cls._auto_parent.element_class(cls._auto_parent, *args, **opts)
@lazy_class_attribute
def _auto_parent(cls):
"""
The automatic parent of the elements of this class.
When calling the constructor of an element of this class, one needs a
parent. This class attribute specifies which parent is used.
EXAMPLES::
sage: RootedTree._auto_parent
Rooted trees
sage: RootedTree([]).parent()
Rooted trees
"""
return RootedTrees_all()
def __init__(self, parent=None, children=[], check=True):
"""
TESTS::
sage: RT4 = RootedTrees(4)
sage: t1 = RT4([[],[[]]])
sage: TestSuite(t1).run()
Some bad inputs are refused::
sage: RT4(69)
Traceback (most recent call last):
...
TypeError: input (69) is not a valid tree
"""
try:
children = list(children)
except TypeError:
raise TypeError("input ({}) is not a valid tree".format(children))
#if not (children.__class__ is self.__class__
# and children.parent() == parent):
children = [self.__class__(parent, x) for x in children]
NormalizedClonableList.__init__(self, parent, children, check=check)
def sort_key(self):
"""
Return a tuple of nonnegative integers encoding the rooted
tree ``self``.
The first entry of the tuple is the number of children of the
root. Then the rest of the tuple is obtained as follows: List
the tuples corresponding to all children (we are regarding the
children themselves as trees). Order this list (not the
tuples!) in lexicographically increasing order, and flatten
it into a single tuple.
This tuple characterizes the rooted tree uniquely, and can be
used to sort the rooted trees.
.. NOTE::
The tree ``self`` must be normalized before calling this
method (see :meth:`normalize`). This doesn't matter
unless you are inside the :meth:`clone` context manager,
because outside of it every rooted tree is already
normalized.
.. NOTE::
By default, this method does not encode any extra
structure that ``self`` might have. If you have a subclass
inheriting from :class:`RootedTree` which allows for some
extra structure, you need to override :meth:`sort_key` in
order to preserve this structure (for example, the
:class:`LabelledRootedTree` class does this in
:meth:`LabelledRootedTree.sort_key`). See the note in the
docstring of
:meth:`sage.combinat.ordered_tree.OrderedTree.sort_key`
for a pitfall.
EXAMPLES::
sage: RT = RootedTree
sage: RT([[],[[]]]).sort_key()
(2, 0, 1, 0)
sage: RT([[[]],[]]).sort_key()
(2, 0, 1, 0)
"""
l = len(self)
if l == 0:
return (0,)
resu = [l] + [u for t in self for u in t.sort_key()]
return tuple(resu)
def __hash__(self):
"""
Return a hash for ``self``.
This is based on :meth:`sort_key`.
EXAMPLES::
sage: RT = RootedTree
sage: hash(RT([[],[[]]])) == hash((2, 0, 1, 0)) # indirect doctest
True
"""
return hash(self.sort_key())
def normalize(self):
r"""
Normalize ``self``.
This function is at the core of the implementation of rooted
(unordered) trees. The underlying structure is provided by
ordered rooted trees. Every rooted tree is represented by a
normalized element in the set of its planar embeddings.
There should be no need to call ``normalize`` directly as it
is called automatically upon creation and cloning or
modification (by ``NormalizedClonableList``).
The normalization has a recursive definition. It means first
that every sub-tree is itself normalized, and also that
sub-trees are sorted. Here the sort is performed according to
the values of the :meth:`sort_key` method.
EXAMPLES::
sage: RT = RootedTree
sage: RT([[],[[]]]) == RT([[[]],[]]) # indirect doctest
True
sage: rt1 = RT([[],[[]]])
sage: rt2 = RT([[[]],[]])
sage: rt1 is rt2
False
sage: rt1 == rt2
True
sage: rt1._get_list() == rt2._get_list()
True
"""
self._require_mutable()
for st in self:
assert st.is_immutable(), "Subtree {} is not normalized".format(st)
self._get_list().sort(key=lambda t: t.sort_key())
# ensure unique representation
self.set_immutable()
def is_empty(self):
r"""
Return if ``self`` is the empty tree.
For rooted trees, this always returns ``False``.
.. NOTE::
This is not the same as ``bool(t)``, which returns whether
``t`` has some child or not.
EXAMPLES::
sage: t = RootedTrees(4)([[],[[]]])
sage: t.is_empty()
False
sage: bool(t)
True
sage: t = RootedTrees(1)([])
sage: t.is_empty()
False
sage: bool(t)
False
"""
return False
def graft_list(self, other):
"""
Return the list of trees obtained by grafting ``other`` on ``self``.
Here grafting means that one takes the disjoint union of
``self`` and ``other``, chooses a node of ``self``,
and adds the root of ``other`` to the list of children of
this node. The root of the resulting tree is the root of
``self``. (This can be done for each node of ``self``;
this method returns the list of all results.)
This is useful for free pre-Lie algebras.
EXAMPLES::
sage: RT = RootedTree
sage: x = RT([])
sage: y = RT([x, x])
sage: x.graft_list(x)
[[[]]]
sage: l = y.graft_list(x); l
[[[], [], []], [[], [[]]], [[], [[]]]]
sage: [parent(i) for i in l]
[Rooted trees, Rooted trees, Rooted trees]
TESTS::
sage: x = RootedTrees(1)([])
sage: y = RootedTrees(3)([x, x])
sage: l = y.graft_list(x); l
[[[], [], []], [[], [[]]], [[], [[]]]]
sage: [parent(i) for i in l]
[Rooted trees, Rooted trees, Rooted trees]
sage: x = RootedTree([[[], []], []])
sage: y = RootedTree([[], []])
sage: len(set(x.graft_list(y)))
4
"""
resu = []
# Grafting ``other`` on the root:
with self.clone() as t:
t.append(other)
resu += [t]
for i, sub in enumerate(self):
# Grafting ``other`` on a descendant of the
# ``i``-th child:
for new_sub in sub.graft_list(other):
with self.clone() as t:
t[i] = new_sub
resu += [t]
return resu
def graft_on_root(self, other):
"""
Return the tree obtained by grafting ``other`` on the root of ``self``.
Here grafting means that one takes the disjoint union of
``self`` and ``other``, and adds the root of ``other`` to
the list of children of ``self``. The root of the resulting
tree is the root of ``self``.
This is useful for free Nap algebras.
EXAMPLES::
sage: RT = RootedTree
sage: x = RT([])
sage: y = RT([x, x])
sage: x.graft_on_root(x)
[[]]
sage: y.graft_on_root(x)
[[], [], []]
sage: x.graft_on_root(y)
[[[], []]]
"""
with self.clone() as t:
t.append(other)
return t
def single_graft(self, x, grafting_function, path_prefix=()):
r"""
Graft subtrees of `x` on ``self`` using the given function.
Let `x_1, x_2, \ldots, x_p` be the children of the root of
`x`. For each `i`, the subtree of `x` comprising all
descendants of `x_i` is joined by a new edge to
the vertex of ``self`` specified by the `i`-th path in the
grafting function (i.e., by the path
``grafting_function[i]``).
The number of vertices of the result is the sum of the numbers
of vertices of ``self`` and `x` minus one, because the root of
`x` is not used.
This is used to define the product of the Grossman-Larson algebras.
INPUT:
- `x` -- a rooted tree
- ``grafting_function`` -- a list of paths in ``self``
- ``path_prefix`` -- optional tuple (default ``()``)
The ``path_prefix`` argument is only used for internal recursion.
EXAMPLES::
sage: LT = LabelledRootedTrees()
sage: y = LT([LT([],label='b')], label='a')
sage: x = LT([LT([],label='d')], label='c')
sage: y.single_graft(x,[(0,)])
a[b[d[]]]
sage: t = LT([LT([],label='b'),LT([],label='c')], label='a')
sage: s = LT([LT([],label='d'),LT([],label='e')], label='f')
sage: t.single_graft(s,[(0,),(1,)])
a[b[d[]], c[e[]]]
"""
P = self.parent()
child_grafts = [suby.single_graft(x, grafting_function,
path_prefix + (i,))
for i, suby in enumerate(self)]
try:
y1 = P(child_grafts, label=self.label())
except AttributeError:
y1 = P(child_grafts)
with y1.clone() as y2:
for k in range(len(x)):
if grafting_function[k] == path_prefix:
y2.append(x[k])
return y2
class RootedTrees(UniqueRepresentation, Parent):
"""
Factory class for rooted trees.
INPUT:
- ``size`` -- (optional) an integer
OUTPUT:
the set of all rooted trees (of the given size ``size`` if
specified)
EXAMPLES::
sage: RootedTrees()
Rooted trees
sage: RootedTrees(2)
Rooted trees with 2 nodes
"""
@staticmethod
def __classcall_private__(cls, n=None):
"""
TESTS::
sage: from sage.combinat.rooted_tree import (RootedTrees_all,
....: RootedTrees_size)
sage: RootedTrees(2) is RootedTrees_size(2)
True
sage: RootedTrees(5).cardinality()
9
sage: RootedTrees() is RootedTrees_all()
True
TESTS::
sage: RootedTrees(0)
Traceback (most recent call last):
...
ValueError: n must be a positive integer
"""
if n is None:
return RootedTrees_all()
if n not in ZZ or n < 1:
raise ValueError("n must be a positive integer")
return RootedTrees_size(Integer(n))
class RootedTrees_all(DisjointUnionEnumeratedSets, RootedTrees):
r"""
Class of all (unordered, unlabelled) rooted trees.
See :class:`RootedTree` for a definition.
"""
def __init__(self):
"""
TESTS::
sage: sum(x**len(t) for t in
....: set(RootedTree(t) for t in OrderedTrees(6)))
x^5 + x^4 + 3*x^3 + 6*x^2 + 9*x
sage: sum(x**len(t) for t in RootedTrees(6))
x^5 + x^4 + 3*x^3 + 6*x^2 + 9*x
sage: TestSuite(RootedTrees()).run() # long time
"""
DisjointUnionEnumeratedSets.__init__(
self, Family(NonNegativeIntegers(), RootedTrees_size),
facade=True, keepkey=False)
def _repr_(self):
r"""
TESTS::
sage: RootedTrees()
Rooted trees
"""
return "Rooted trees"
def __contains__(self, x):
"""
TESTS::
sage: S = RootedTrees()
sage: 1 in S
False
sage: S([]) in S
True
"""
return isinstance(x, self.element_class)
def unlabelled_trees(self):
"""
Return the set of unlabelled trees associated to ``self``.
EXAMPLES::
sage: RootedTrees().unlabelled_trees()
Rooted trees
"""
return self
def labelled_trees(self):
"""
Return the set of labelled trees associated to ``self``.
EXAMPLES::
sage: RootedTrees().labelled_trees()
Labelled rooted trees
As a consequence::
sage: lb = RootedTrees()([[],[[], []]]).canonical_labelling()
sage: lb
1[2[], 3[4[], 5[]]]
sage: lb.__class__
<class 'sage.combinat.rooted_tree.LabelledRootedTrees_all_with_category.element_class'>
sage: lb.parent()
Labelled rooted trees
"""
return LabelledRootedTrees()
def _element_constructor_(self, *args, **keywords):
"""
EXAMPLES::
sage: B = RootedTrees()
sage: B._element_constructor_([])
[]
sage: B([[],[]]) # indirect doctest
[[], []]
"""
return self.element_class(self, *args, **keywords)
@cached_method
def leaf(self):
"""
Return a leaf tree with ``self`` as parent.
EXAMPLES::
sage: RootedTrees().leaf()
[]
"""
return self([])
Element = RootedTree
class RootedTrees_size(RootedTrees):
"""
The enumerated set of rooted trees with a given number of nodes.
The number of nodes of a rooted tree is defined recursively:
The number of nodes of a rooted tree with `a` children is `a`
plus the sum of the number of nodes of each of these children.
TESTS::
sage: from sage.combinat.rooted_tree import RootedTrees_size
sage: for i in range(1, 6): TestSuite(RootedTrees_size(i)).run()
"""
def __init__(self, n):
"""
TESTS::
sage: for i in range(1, 6):
....: TestSuite(RootedTrees(i)).run()
"""
super(RootedTrees_size, self).__init__(category=FiniteEnumeratedSets())
self._n = n
def _repr_(self):
r"""
TESTS::
sage: RootedTrees(4) # indirect doctest
Rooted trees with 4 nodes
"""
return "Rooted trees with {} nodes".format(self._n)
def __contains__(self, x):
"""
TESTS::
sage: S = RootedTrees(3)
sage: 1 in S
False
sage: S([[],[]]) in S
True
"""
return isinstance(x, self.element_class) and x.node_number() == self._n
def _an_element_(self):
"""
TESTS::
sage: RootedTrees(4).an_element() # indirect doctest
[[[[]]]]
"""
return self.first()
def __iter__(self):
"""
An iterator for ``self``.
This generates the rooted trees of given size. The algorithm
first picks a partition for the sizes of subtrees, then picks
appropriate tuples of smaller trees.
EXAMPLES::
sage: from sage.combinat.rooted_tree import *
sage: RootedTrees(1).list()
[[]]
sage: RootedTrees(2).list()
[[[]]]
sage: RootedTrees(3).list()
[[[[]]], [[], []]]
sage: RootedTrees(4).list()
[[[[[]]]], [[[], []]], [[], [[]]], [[], [], []]]
"""
if self._n == 1:
yield self._element_constructor_([])
return
from sage.combinat.partition import Partitions
from itertools import combinations_with_replacement, product
for part in Partitions(self._n - 1):
mults = part.to_exp_dict()
choices = []
for p, mp in mults.items():
lp = self.__class__(p).list()
new_choice = [list(z) for z in combinations_with_replacement(lp, mp)]
choices.append(new_choice)
for c in product(*choices):
yield self.element_class(self._parent_for, sum(c, []))
def check_element(self, el, check=True):
r"""
Check that a given tree actually belongs to ``self``.
This just checks the number of vertices.
EXAMPLES::
sage: RT3 = RootedTrees(3)
sage: RT3([[],[]]) # indirect doctest
[[], []]
sage: RT3([[],[],[]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: wrong number of nodes
"""
if el.node_number() != self._n:
raise ValueError("wrong number of nodes")
def cardinality(self):
r"""
Return the cardinality of ``self``.
EXAMPLES::
sage: RootedTrees(1).cardinality()
1
sage: RootedTrees(3).cardinality()
2
"""
return number_of_rooted_trees(self._n)
@lazy_attribute
def _parent_for(self):
"""
The parent of the elements generated by ``self``.
TESTS::
sage: S = RootedTrees(3)
sage: S._parent_for
Rooted trees
"""
return RootedTrees_all()
@lazy_attribute
def element_class(self):
"""
TESTS::
sage: S = RootedTrees(3)
sage: S.element_class
<class 'sage.combinat.rooted_tree.RootedTrees_all_with_category.element_class'>
sage: S.first().__class__ == RootedTrees().first().__class__
True
"""
return self._parent_for.element_class
def _element_constructor_(self, *args, **keywords):
"""
EXAMPLES::
sage: S = RootedTrees(2)
sage: S([]) # indirect doctest
Traceback (most recent call last):
...
ValueError: wrong number of nodes
sage: S([[]]) # indirect doctest
[[]]
sage: S = RootedTrees(1) # indirect doctest
sage: S([])
[]
"""
res = self.element_class(self._parent_for, *args, **keywords)
if res.node_number() != self._n:
raise ValueError("wrong number of nodes")
return res
class LabelledRootedTree(AbstractLabelledClonableTree, RootedTree):
"""
Labelled rooted trees.
A labelled rooted tree is a rooted tree with a label
attached at each node.
More formally:
The *labelled rooted trees* are an inductive datatype defined
as follows: A labelled rooted tree is a multiset of labelled
rooted trees, endowed with a label (which can be any object,
including ``None``). The trees that belong to this multiset
are said to be the *children* of the tree. (Notice that the
labels of these children may and may not be of the same type
as the label of the tree). A labelled rooted tree which has
no children (so the only information it carries is its label)
is said to be a *leaf*.
Every labelled rooted tree gives rise to an unlabelled rooted
tree (:class:`RootedTree`) by forgetting the labels. (This is
implemented as a conversion.)
INPUT:
- ``children`` -- a list or tuple or more generally any iterable
of trees or objects convertible to trees
- ``label`` -- any hashable Sage object (default is ``None``)
EXAMPLES::
sage: x = LabelledRootedTree([], label = 3); x
3[]
sage: LabelledRootedTree([x, x, x], label = 2)
2[3[], 3[], 3[]]
sage: LabelledRootedTree((x, x, x), label = 2)
2[3[], 3[], 3[]]
sage: LabelledRootedTree([[],[[], []]], label = 3)
3[None[], None[None[], None[]]]
Children are reordered using the value of the :meth:`sort_key` method::
sage: y = LabelledRootedTree([], label = 5); y
5[]
sage: xyy2 = LabelledRootedTree((x, y, y), label = 2); xyy2
2[3[], 5[], 5[]]
sage: yxy2 = LabelledRootedTree((y, x, y), label = 2); yxy2
2[3[], 5[], 5[]]
sage: xyy2 == yxy2
True
Converting labelled into unlabelled rooted trees by
forgetting the labels, and back (the labels are
initialized as ``None``)::
sage: yxy2crude = RootedTree(yxy2); yxy2crude
[[], [], []]
sage: LabelledRootedTree(yxy2crude)
None[None[], None[], None[]]
TESTS::
sage: xyy2._get_list() == yxy2._get_list()
True
"""
@staticmethod
def __classcall_private__(cls, *args, **opts):
"""
Ensure that trees created by the sets and directly are the same and
that they are instances of :class:`LabelledRootedTree`.
TESTS::
sage: issubclass(LabelledRootedTrees().element_class, LabelledRootedTree)
True
sage: t0 = LabelledRootedTree([[],[[], []]], label = 3)
sage: t0.parent()
Labelled rooted trees
sage: type(t0)
<class 'sage.combinat.rooted_tree.LabelledRootedTrees_all_with_category.element_class'>
"""
return cls._auto_parent.element_class(cls._auto_parent, *args, **opts)
@lazy_class_attribute
def _auto_parent(cls):
"""
The automatic parent of the element of this class.
When calling the constructor of an element of this class, one needs a
parent. This class attribute specifies which parent is used.
EXAMPLES::
sage: LabelledRootedTree._auto_parent
Labelled rooted trees
sage: LabelledRootedTree([], label = 3).parent()
Labelled rooted trees
"""
return LabelledRootedTrees()
def sort_key(self):
"""
Return a tuple of nonnegative integers encoding the labelled
rooted tree ``self``.
The first entry of the tuple is a pair consisting of the
number of children of the root and the label of the root. Then
the rest of the tuple is obtained as follows: List
the tuples corresponding to all children (we are regarding the
children themselves as trees). Order this list (not the
tuples!) in lexicographically increasing order, and flatten
it into a single tuple.
This tuple characterizes the labelled rooted tree uniquely, and
can be used to sort the labelled rooted trees provided that the
labels belong to a type which is totally ordered.
.. NOTE::
The tree ``self`` must be normalized before calling this
method (see :meth:`normalize`). This doesn't matter
unless you are inside the :meth:`clone` context manager,
because outside of it every rooted tree is already
normalized.
.. NOTE::
This method overrides :meth:`RootedTree.sort_key`
and returns a result different from what the latter
would return, as it wants to encode the whole labelled
tree including its labelling rather than just the
unlabelled tree. Therefore, be careful with using this
method on subclasses of :class:`RootedOrderedTree`;
under some circumstances they could inherit it from
another superclass instead of from :class:`RootedTree`,
which would cause the method to forget the labelling.
See the docstrings of :meth:`RootedTree.sort_key` and
:meth:`sage.combinat.ordered_tree.OrderedTree.sort_key`.
EXAMPLES::
sage: LRT = LabelledRootedTrees(); LRT
Labelled rooted trees
sage: x = LRT([], label = 3); x
3[]
sage: x.sort_key()
((0, 3),)
sage: y = LRT([x, x, x], label = 2); y
2[3[], 3[], 3[]]
sage: y.sort_key()
((3, 2), (0, 3), (0, 3), (0, 3))
sage: LRT.an_element().sort_key()
((3, 'alpha'), (0, 3), (1, 5), (0, None), (2, 42), (0, 3), (0, 3))
sage: lb = RootedTrees()([[],[[], []]]).canonical_labelling()
sage: lb.sort_key()
((2, 1), (0, 2), (2, 3), (0, 4), (0, 5))
"""
l = len(self)
if l == 0:
return ((0, self.label()),)
resu = [(l, self.label())] + [u for t in self for u in t.sort_key()]
return tuple(resu)
def __hash__(self):
"""
Return a hash for ``self``.
EXAMPLES::
sage: lb = RootedTrees()([[],[[], []]]).canonical_labelling()
sage: hash(lb) == hash(((2, 1), (0, 2), (2, 3), (0, 4), (0, 5))) # indirect doctest
True
"""
return hash(self.sort_key())
_UnLabelled = RootedTree
class LabelledRootedTrees(UniqueRepresentation, Parent):
"""
This is a parent stub to serve as a factory class for labelled
rooted trees.
EXAMPLES::
sage: LRT = LabelledRootedTrees(); LRT
Labelled rooted trees
sage: x = LRT([], label = 3); x
3[]
sage: x.parent() is LRT
True
sage: y = LRT([x, x, x], label = 2); y
2[3[], 3[], 3[]]
sage: y.parent() is LRT
True
.. TODO::
Add the possibility to restrict the labels to a fixed set.
"""
@staticmethod
def __classcall_private__(cls, n=None):
"""
TESTS::
sage: from sage.combinat.rooted_tree import LabelledRootedTrees_all
sage: LabelledRootedTrees_all() == LabelledRootedTrees()
True
"""
return LabelledRootedTrees_all()
class LabelledRootedTrees_all(LabelledRootedTrees):
r"""
Class of all (unordered) labelled rooted trees.
See :class:`LabelledRootedTree` for a definition.
"""
def __init__(self, category=None):
"""
TESTS::
sage: TestSuite(LabelledRootedTrees()).run()
"""
if category is None:
category = Sets()
category = category.Infinite()
Parent.__init__(self, category=category)
def _repr_(self):
"""
Return the string representation of ``self``.
TESTS::
sage: LabelledRootedTrees()
Labelled rooted trees
"""
return "Labelled rooted trees"
def _an_element_(self):
"""
Return a labelled tree.
EXAMPLES::
sage: LabelledRootedTrees().an_element() # indirect doctest
alpha[3[], 5[None[]], 42[3[], 3[]]]
"""
LT = self._element_constructor_
t = LT([], label=3)
t1 = LT([t, t], label=42)
t2 = LT([[]], label=5)
return LT([t, t1, t2], label="alpha")
def unlabelled_trees(self):
"""
Return the set of unlabelled trees associated to ``self``.
EXAMPLES::
sage: LabelledRootedTrees().unlabelled_trees()
Rooted trees
"""
return RootedTrees_all()
def labelled_trees(self):
"""
Return the set of labelled trees associated to ``self``.
EXAMPLES::
sage: LabelledRootedTrees().labelled_trees()
Labelled rooted trees
"""
return self
Element = LabelledRootedTree
| 31.583647 | 99 | 0.557239 |
ace40160758642364cbe72dd995d0b408baa5bc0 | 834 | py | Python | leetcode/bit_operation/78.py | 1lch2/PythonExercise | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | [
"MIT"
] | 1 | 2020-08-19T09:26:20.000Z | 2020-08-19T09:26:20.000Z | leetcode/bit_operation/78.py | 1lch2/PythonExercise | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | [
"MIT"
] | null | null | null | leetcode/bit_operation/78.py | 1lch2/PythonExercise | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | [
"MIT"
] | null | null | null | # 给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
# 说明:解集不能包含重复的子集。
# 示例:
# 输入: nums = [1,2,3]
# 输出:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
from typing import List
# Binary bits solution.
# Reference: http://wuchong.me/blog/2014/07/28/permutation-and-combination-realize/
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
#* 使用二进制位的迭代来代表对应元素是否在组合中存在
res = [] # 不需要提前加入空集
bitmask = 1 # 用来判断对应数字是否加入的掩码
iter_times = bitmask << len(nums) # 循环截止的上限值
for i in range(iter_times): # 迭代从 0 开始则第一个加入的是空集
temp = []
for j in range(len(nums)): # 在给定元素数量内循环检测每一位是否要加入
if (1 << j) & i: # 若对应位和掩码与结果不为 0 则加入
temp.append(nums[j])
res.append(temp)
return res
| 21.947368 | 83 | 0.541966 |
ace40188b6c1612340cfdc0d2354e9e40c7cd6e3 | 77,595 | py | Python | src/device-manager/python/openweave/WeaveDeviceMgr.py | kghost/openweave-core | f9fec15d5969d0c62cfbbe4b318bf15d9a9b2439 | [
"Apache-2.0"
] | 1 | 2020-05-03T22:50:24.000Z | 2020-05-03T22:50:24.000Z | src/device-manager/python/openweave/WeaveDeviceMgr.py | kghost/openweave-core | f9fec15d5969d0c62cfbbe4b318bf15d9a9b2439 | [
"Apache-2.0"
] | null | null | null | src/device-manager/python/openweave/WeaveDeviceMgr.py | kghost/openweave-core | f9fec15d5969d0c62cfbbe4b318bf15d9a9b2439 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013-2018 Nest Labs, Inc.
# Copyright (c) 2019-2020 Google, LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for Weave Device Manager
#
"""Weave Device Manager interface
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import sys
import os
import re
import copy
import binascii
import datetime
import time
import glob
import platform
import ast
from threading import Thread, Lock, Event
from ctypes import *
import six
from six.moves import range
from .WeaveUtility import WeaveUtility
from .WeaveStack import *
__all__ = [ 'WeaveDeviceManager', 'NetworkInfo', 'DeviceDescriptor' ]
NetworkType_WiFi = 1
NetworkType_Thread = 2
WiFiMode_AdHoc = 1
WiFiMode_Managed = 2
WiFiRole_Station = 1
WiFiRole_AccessPoint = 2
WiFiSecurityType_None = 1
WiFiSecurityType_WEP = 2
WiFiSecurityType_WPAPersonal = 3
WiFiSecurityType_WPA2Personal = 4
WiFiSecurityType_WPA2MixedPersonal = 5
WiFiSecurityType_WPAEnterprise = 6
WiFiSecurityType_WPA2Enterprise = 7
WiFiSecurityType_WPA2MixedEnterprise = 8
WiFiSecurityType_WPA3Personal = 9
WiFiSecurityType_WPA3MixedPersonal = 10
WiFiSecurityType_WPA3Enterprise = 11
WiFiSecurityType_WPA3MixedEnterprise = 12
ThreadPANId_NotSpecified = 0xFFFFFFFF
ThreadChannel_NotSpecified = 0xFF
RendezvousMode_EnableWiFiRendezvousNetwork = 0x0001
RendezvousMode_Enable802154RendezvousNetwork = 0x0002
RendezvousMode_EnableFabricRendezvousAddress = 0x0004
TargetFabricId_AnyFabric = 0xFFFFFFFFFFFFFFFF
TargetFabricId_NotInFabric = 0
TargetDeviceMode_Any = 0x00000000 # Locate all devices regardless of mode.
TargetDeviceMode_UserSelectedMode = 0x00000001 # Locate all devices in 'user-selected' mode -- i.e. where the device has
# has been directly identified by a user, e.g. by pressing a button.
TargetVendorId_Any = 0xFFFF
TargetProductId_Any = 0xFFFF
TargetDeviceId_Any = 0xFFFFFFFFFFFFFFFF
DeviceFeature_HomeAlarmLinkCapable = 0x00000001 # Indicates a Nest Protect that supports connection to a home alarm panel
DeviceFeature_LinePowered = 0x00000002 # Indicates a device that requires line power
SystemTest_ProductList = { 'thermostat' : 0x235A000A,
'topaz' : 0x235A0003}
DeviceDescriptorFlag_IsRendezvousWiFiESSIDSuffix = 0x01
class NetworkInfo:
def __init__(self, networkType=None, networkId=None, wifiSSID=None, wifiMode=None, wifiRole=None,
wifiSecurityType=None, wifiKey=None,
threadNetworkName=None, threadExtendedPANId=None, threadNetworkKey=None, threadPSKc=None,
wirelessSignalStrength=None, threadPANId=None, threadChannel=None):
self.NetworkType = networkType
self.NetworkId = networkId
self.WiFiSSID = wifiSSID
self.WiFiMode = wifiMode
self.WiFiRole = wifiRole
self.WiFiSecurityType = wifiSecurityType
self.WiFiKey = wifiKey
self.ThreadNetworkName = threadNetworkName
self.ThreadExtendedPANId = threadExtendedPANId
self.ThreadNetworkKey = threadNetworkKey
self.ThreadPSKc = threadPSKc
self.ThreadPANId = threadPANId
self.ThreadChannel = threadChannel
self.WirelessSignalStrength = wirelessSignalStrength
def Print(self, prefix=""):
print("%sNetwork Type: %s" % (prefix, NetworkTypeToString(self.NetworkType)))
if self.NetworkId != None:
print("%sNetwork Id: %d" % (prefix, self.NetworkId))
if self.WiFiSSID != None:
print("%sWiFi SSID: \"%s\"" % (prefix, self.WiFiSSID))
if self.WiFiMode != None:
print("%sWiFi Mode: %s" % (prefix, WiFiModeToString(self.WiFiMode)))
if self.WiFiRole != None:
print("%sWiFi Role: %s" % (prefix, WiFiRoleToString(self.WiFiRole)))
if self.WiFiSecurityType != None:
print("%sWiFi Security Type: %s" % (prefix, WiFiSecurityTypeToString(self.WiFiSecurityType)))
if self.WiFiKey != None:
print("%sWiFi Key: %s" % (prefix, self.WiFiKey))
if self.ThreadNetworkName != None:
print("%sThread Network Name: \"%s\"" % (prefix, self.ThreadNetworkName))
if self.ThreadExtendedPANId != None:
print("%sThread Extended PAN Id: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.ThreadExtendedPANId)))
if self.ThreadNetworkKey != None:
print("%sThread Network Key: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.ThreadNetworkKey)))
if self.ThreadPSKc != None:
print("%sThread Network PSKc: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.ThreadPSKc)))
if self.ThreadPANId != None:
print("%sThread PAN Id: %04x" % (prefix, self.ThreadPANId))
if self.ThreadChannel != None:
print("%sThread Channel: %d" % (prefix, self.ThreadChannel))
if self.WirelessSignalStrength != None:
print("%sWireless Signal Strength: %s" % (prefix, self.WirelessSignalStrength))
def SetField(self, name, val):
name = name.lower();
if (name == 'networktype' or name == 'network-type' or name == 'type'):
self.NetworkType = ParseNetworkType(val)
elif (name == 'networkid' or name == 'network-id' or name == 'id'):
self.NetworkId = int(val)
elif (name == 'wifissid' or name == 'wifi-ssid' or name == 'ssid'):
self.WiFiSSID = val
elif (name == 'wifimode' or name == 'wifi-mode'):
self.WiFiMode = ParseWiFiMode(val)
elif (name == 'wifirole' or name == 'wifi-role'):
self.WiFiRole = ParseWiFiRole(val)
elif (name == 'wifisecuritytype' or name == 'wifi-security-type' or name == 'securitytype' or name == 'security-type' or name == 'wifi-security' or name == 'security'):
self.WiFiSecurityType = ParseSecurityType(val)
elif (name == 'wifikey' or name == 'wifi-key' or name == 'key'):
self.WiFiKey = val
elif (name == 'threadnetworkname' or name == 'thread-network-name' or name == 'thread-name'):
self.ThreadNetworkName = val
elif (name == 'threadextendedpanid' or name == 'thread-extended-pan-id'):
self.ThreadExtendedPANId = val
elif (name == 'threadnetworkkey' or name == 'thread-network-key' or name == 'thread-key'):
self.ThreadNetworkKey = val
elif (name == 'threadpskc' or name == 'thread-pskc' or name == 'pskc'):
self.ThreadPSKc = val
elif (name == 'threadpanid' or name == 'thread-pan-id' or name == 'pan-id'):
self.ThreadPANId = val
elif (name == 'threadchannel' or name == 'thread-channel'):
self.ThreadChannel = val
elif (name == 'wirelesssignalstrength' or name == 'wireless-signal-strength'):
self.WirelessSignalStrength = val
else:
raise Exception("Invalid NetworkInfo field: " + str(name))
class DeviceDescriptor:
def __init__(self, deviceId=None, fabricId=None, vendorId=None, productId=None, productRevision=None,
manufacturingYear=None, manufacturingMonth=None, manufacturingDay=None,
primary802154MACAddress=None, primaryWiFiMACAddress=None,
serialNumber=None, softwareVersion=None, rendezvousWiFiESSID=None, pairingCode=None,
pairingCompatibilityVersionMajor=None, pairingCompatibilityVersionMinor=None,
deviceFeatures=None, flags=None):
self.DeviceId = deviceId
self.FabricId = fabricId
self.VendorId = vendorId
self.ProductId = productId
self.ProductRevision = productRevision
self.ManufacturingYear = manufacturingYear
self.ManufacturingMonth = manufacturingMonth
self.ManufacturingDay = manufacturingDay
self.Primary802154MACAddress = primary802154MACAddress
self.PrimaryWiFiMACAddress = primaryWiFiMACAddress
self.SerialNumber = serialNumber
self.SoftwareVersion = softwareVersion
self.RendezvousWiFiESSID = rendezvousWiFiESSID
self.PairingCode = pairingCode
self.PairingCompatibilityVersionMajor = pairingCompatibilityVersionMajor
self.PairingCompatibilityVersionMinor = pairingCompatibilityVersionMinor
self.DeviceFeatures = [ ]
if deviceFeatures != None:
featureVal = 1
while featureVal != 0x80000000:
if (deviceFeatures & featureVal) == featureVal:
self.DeviceFeatures.append(featureVal)
featureVal <<= 1
self.Flags = flags if flags != None else 0
def Print(self, prefix=""):
if self.DeviceId != None:
print("%sDevice Id: %016X" % (prefix, self.DeviceId))
if self.FabricId != None:
print("%sFabrid Id: %016X" % (prefix, self.FabricId))
if self.VendorId != None:
print("%sVendor Id: %X" % (prefix, self.VendorId))
if self.ProductId != None:
print("%sProduct Id: %X" % (prefix, self.ProductId))
if self.ProductRevision != None:
print("%sProduct Revision: %X" % (prefix, self.ProductRevision))
if self.SerialNumber != None:
print("%sSerial Number: %s" % (prefix, self.SerialNumber))
if self.SoftwareVersion != None:
print("%sSoftware Version: %s" % (prefix, self.SoftwareVersion))
if self.ManufacturingYear != None and self.ManufacturingMonth != None:
if self.ManufacturingDay != None:
print("%sManufacturing Date: %04d/%02d/%02d" % (prefix, self.ManufacturingYear, self.ManufacturingMonth, self.ManufacturingDay))
else:
print("%sManufacturing Date: %04d/%02d" % (prefix, self.ManufacturingYear, self.ManufacturingMonth))
if self.Primary802154MACAddress != None:
print("%sPrimary 802.15.4 MAC Address: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.Primary802154MACAddress)))
if self.PrimaryWiFiMACAddress != None:
print("%sPrimary WiFi MAC Address: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.PrimaryWiFiMACAddress)))
if self.RendezvousWiFiESSID != None:
print("%sRendezvous WiFi ESSID%s: %s" % (prefix, " Suffix" if self.IsRendezvousWiFiESSIDSuffix else "", self.RendezvousWiFiESSID))
if self.PairingCode != None:
print("%sPairing Code: %s" % (prefix, self.PairingCode))
if self.PairingCompatibilityVersionMajor != None:
print("%sPairing Compatibility Major Id: %X" % (prefix, self.PairingCompatibilityVersionMajor))
if self.PairingCompatibilityVersionMinor != None:
print("%sPairing Compatibility Minor Id: %X" % (prefix, self.PairingCompatibilityVersionMinor))
if self.DeviceFeatures != None:
print("%sDevice Features: %s" % (prefix, " ".join([DeviceFeatureToString(val) for val in self.DeviceFeatures])))
@property
def IsRendezvousWiFiESSIDSuffix(self):
return (self.Flags & DeviceDescriptorFlag_IsRendezvousWiFiESSIDSuffix) != 0
class WirelessRegConfig:
def __init__(self, regDomain=None, opLocation=None, supportedRegDomains=None):
self.RegDomain = regDomain
self.OpLocation = opLocation
self.SupportedRegDomains = supportedRegDomains
def Print(self, prefix=""):
if self.RegDomain != None:
print("%sRegulatory Domain: %s%s" % (prefix, self.RegDomain, ' (world wide)' if self.RegDomain == '00' else ''))
if self.OpLocation != None:
print("%sOperating Location: %s" % (prefix, OperatingLocationToString(self.OpLocation)))
if self.SupportedRegDomains != None:
print("%sSupported Regulatory Domains: %s" % (prefix, ','.join(self.SupportedRegDomains)))
class _IdentifyDeviceCriteriaStruct(Structure):
_fields_ = [
("TargetFabricId", c_uint64),
("TargetModes", c_uint32),
("TargetVendorId", c_uint16),
("TargetProductId", c_uint16),
("TargetDeviceId", c_uint64)
]
class _NetworkInfoStruct(Structure):
_fields_ = [
('NetworkType', c_int32), # The type of network.
('NetworkId', c_int64), # network id assigned to the network by the device, -1 if not specified.
('WiFiSSID', c_char_p), # The WiFi SSID.
('WiFiMode', c_int32), # The operating mode of the WiFi network.
('WiFiRole', c_int32), # The role played by the device on the WiFi network.
('WiFiSecurityType', c_int32), # The WiFi security type.
('WiFiKey', c_void_p), # The WiFi key, or NULL if not specified.
('WiFiKeyLen', c_uint32), # The length in bytes of the WiFi key.
('ThreadNetworkName', c_char_p), # The name of the Thread network.
('ThreadExtendedPANId', c_void_p), # The Thread extended PAN id (8 bytes).
('ThreadNetworkKey', c_void_p), # The Thread master network key.
('ThreadPSKc', c_void_p), # The Thread pre-shared key for commissioner
('ThreadPANId', c_uint32), # The 16-bit Thread PAN ID, or kThreadPANId_NotSpecified
('ThreadChannel', c_uint8), # The current channel on which the Thread network operates, or kThreadChannel_NotSpecified
('WirelessSignalStrength', c_int16),# The signal strength of the network, or INT16_MIN if not available/applicable.
('Hidden', c_bool) # Whether or not the network is hidden.
]
def toNetworkInfo(self):
return NetworkInfo(
networkType = self.NetworkType if self.NetworkType != -1 else None,
networkId = self.NetworkId if self.NetworkId != -1 else None,
wifiSSID = WeaveUtility.CStringToString(self.WiFiSSID),
wifiMode = self.WiFiMode if self.WiFiMode != -1 else None,
wifiRole = self.WiFiRole if self.WiFiRole != -1 else None,
wifiSecurityType = self.WiFiSecurityType if self.WiFiSecurityType != -1 else None,
wifiKey = WeaveUtility.VoidPtrToByteArray(self.WiFiKey, self.WiFiKeyLen),
threadNetworkName = WeaveUtility.CStringToString(self.ThreadNetworkName),
threadExtendedPANId = WeaveUtility.VoidPtrToByteArray(self.ThreadExtendedPANId, 8),
threadNetworkKey = WeaveUtility.VoidPtrToByteArray(self.ThreadNetworkKey, 16),
threadPSKc = WeaveUtility.VoidPtrToByteArray(self.ThreadPSKc, 16),
threadPANId = self.ThreadPANId if self.ThreadPANId != ThreadPANId_NotSpecified else None,
threadChannel = self.ThreadChannel if self.ThreadChannel != ThreadChannel_NotSpecified else None,
wirelessSignalStrength = self.WirelessSignalStrength if self.WirelessSignalStrength != -32768 else None
)
@classmethod
def fromNetworkInfo(cls, networkInfo):
networkInfoStruct = cls()
networkInfoStruct.NetworkType = networkInfo.NetworkType if networkInfo.NetworkType != None else -1
networkInfoStruct.NetworkId = networkInfo.NetworkId if networkInfo.NetworkId != None else -1
networkInfoStruct.WiFiSSID = WeaveUtility.StringToCString(networkInfo.WiFiSSID)
networkInfoStruct.WiFiMode = networkInfo.WiFiMode if networkInfo.WiFiMode != None else -1
networkInfoStruct.WiFiRole = networkInfo.WiFiRole if networkInfo.WiFiRole != None else -1
networkInfoStruct.WiFiSecurityType = networkInfo.WiFiSecurityType if networkInfo.WiFiSecurityType != None else -1
networkInfoStruct.WiFiKey = WeaveUtility.ByteArrayToVoidPtr(networkInfo.WiFiKey)
networkInfoStruct.WiFiKeyLen = len(networkInfo.WiFiKey) if (networkInfo.WiFiKey != None) else 0
networkInfoStruct.ThreadNetworkName = WeaveUtility.StringToCString(networkInfo.ThreadNetworkName)
networkInfoStruct.ThreadExtendedPANId = WeaveUtility.ByteArrayToVoidPtr(networkInfo.ThreadExtendedPANId)
networkInfoStruct.ThreadNetworkKey = WeaveUtility.ByteArrayToVoidPtr(networkInfo.ThreadNetworkKey)
networkInfoStruct.ThreadPSKc = WeaveUtility.ByteArrayToVoidPtr(networkInfo.ThreadPSKc)
networkInfoStruct.ThreadPANId = networkInfo.ThreadPANId if networkInfo.ThreadPANId != None else ThreadPANId_NotSpecified
networkInfoStruct.ThreadChannel = networkInfo.ThreadChannel if networkInfo.ThreadChannel != None else ThreadChannel_NotSpecified
networkInfoStruct.WirelessSignalStrength = networkInfo.WirelessSignalStrength if networkInfo.WirelessSignalStrength != None else -32768
return networkInfoStruct
class _DeviceDescriptorStruct(Structure):
_fields_ = [
('DeviceId', c_uint64), # Weave device id (0 = not present)
('FabricId', c_uint64), # Id of Weave fabric to which the device belongs (0 = not present)
('DeviceFeatures', c_uint32), # Bit field indicating support for specific device features.
('VendorId', c_uint16), # Device vendor id (0 = not present)
('ProductId', c_uint16), # Device product id (0 = not present)
('ProductRevision', c_uint16), # Device product revision (0 = not present)
('ManufacturingYear', c_uint16), # Year of device manufacture (valid range 2001 - 2099, 0 = not present)
('ManufacturingMonth', c_ubyte), # Month of device manufacture (1 = January, 0 = not present)
('ManufacturingDay', c_ubyte), # Day of device manufacture (0 = not present)
('Primary802154MACAddress', c_ubyte * 8), # MAC address for primary 802.15.4 interface (big-endian, all zeros = not present)
('PrimaryWiFiMACAddress', c_ubyte * 6), # MAC address for primary WiFi interface (big-endian, all zeros = not present)
('SerialNumber', c_char * 33), # Serial number of device (nul terminated, 0 length = not present)
('SoftwareVersion', c_char * 33), # Version of software running on the device (nul terminated, 0 length = not present)
('RendezvousWiFiESSID', c_char * 33), # ESSID for pairing WiFi network (nul terminated, 0 length = not present)
('PairingCode', c_char * 17), # Device pairing code (nul terminated, 0 length = not present)
('PairingCompatibilityVersionMajor', c_uint16), # Pairing software compatibility major version
('PairingCompatibilityVersionMinor', c_uint16), # Pairing software compatibility minor version
('Flags', c_ubyte), # Flags
]
def toDeviceDescriptor(self):
return DeviceDescriptor(
deviceId = self.DeviceId if self.DeviceId != 0 else None,
fabricId = self.FabricId if self.FabricId != 0 else None,
vendorId = self.VendorId if self.VendorId != 0 else None,
productId = self.ProductId if self.ProductId != 0 else None,
productRevision = self.ProductRevision if self.ProductRevision != 0 else None,
manufacturingYear = self.ManufacturingYear if self.ManufacturingYear != 0 else None,
manufacturingMonth = self.ManufacturingMonth if self.ManufacturingMonth != 0 else None,
manufacturingDay = self.ManufacturingDay if self.ManufacturingDay != 0 else None,
primary802154MACAddress = bytearray(self.Primary802154MACAddress) if not WeaveUtility.IsByteArrayAllZeros(self.Primary802154MACAddress) else None,
primaryWiFiMACAddress = bytearray(self.PrimaryWiFiMACAddress) if not WeaveUtility.IsByteArrayAllZeros(self.PrimaryWiFiMACAddress) else None,
serialNumber = WeaveUtility.CStringToString(self.SerialNumber) if len(self.SerialNumber) != 0 else None,
softwareVersion = WeaveUtility.CStringToString(self.SoftwareVersion) if len(self.SoftwareVersion) != 0 else None,
rendezvousWiFiESSID = WeaveUtility.CStringToString(self.RendezvousWiFiESSID) if len(self.RendezvousWiFiESSID) != 0 else None,
pairingCode = WeaveUtility.CStringToString(self.PairingCode) if len(self.PairingCode) != 0 else None,
pairingCompatibilityVersionMajor = self.PairingCompatibilityVersionMajor,
pairingCompatibilityVersionMinor = self.PairingCompatibilityVersionMinor,
deviceFeatures = self.DeviceFeatures,
flags = self.Flags)
class _WirelessRegDomain(Structure):
_fields_ = [
('Code', c_char * 2), # Wireless regulatory domain code (exactly 2 characters, non-null terminated)
]
def __str__(self, *args, **kwargs):
return ''.join(WeaveUtility.CStringToString(self.Code))
@classmethod
def fromStr(cls, val):
regDomainStruct = cls()
if val != None:
if len(val) != 2:
raise ValueError('Invalid wireless regulatory domain code: ' + val)
regDomainStruct.Code = WeaveUtility.StringToCString(val)
else:
regDomainStruct.Code = b'\0\0'
return regDomainStruct
class _WirelessRegConfigStruct(Structure):
_fields_ = [
('SupportedRegDomains', POINTER(_WirelessRegDomain)), # Array of _WirelessRegDomain structures
('NumSupportedRegDomains', c_uint16), # Length of SupportedRegDomains array
('RegDomain', _WirelessRegDomain), # Selected wireless regulatory domain
('OpLocation', c_ubyte), # Selected operating location
]
def toWirelessRegConfig(self):
return WirelessRegConfig(
regDomain = str(self.RegDomain) if self.RegDomain.Code[0] != 0 else None,
opLocation = self.OpLocation if self.OpLocation != 0xFF else None,
supportedRegDomains = [ str(self.SupportedRegDomains[i]) for i in range(0, self.NumSupportedRegDomains) ]
)
@classmethod
def fromWirelessRegConfig(cls, regConfig):
regConfigStruct = cls()
regConfigStruct.SupportedRegDomains = POINTER(_WirelessRegDomain)()
regConfigStruct.NumSupportedRegDomains = 0
regConfigStruct.RegDomain = _WirelessRegDomain.fromStr(regConfig.RegDomain)
regConfigStruct.OpLocation = regConfig.OpLocation if regConfig.OpLocation != None else 0
return regConfigStruct
_CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_IdentifyDeviceCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(_DeviceDescriptorStruct))
_PairTokenCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_uint32)
_UnpairTokenCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_NetworkScanCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16, POINTER(_NetworkInfoStruct))
_AddNetworkCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint32)
_GetNetworksCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16, POINTER(_NetworkInfoStruct))
_GetCameraAuthDataCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_char_p, c_char_p)
_GetRendezvousModeCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16)
_GetFabricConfigCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_uint32)
_ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(DeviceStatusStruct))
_GetBleEventFunct = CFUNCTYPE(c_void_p)
_WriteBleCharacteristicFunct = CFUNCTYPE(c_bool, c_void_p, c_void_p, c_void_p, c_void_p, c_uint16)
_SubscribeBleCharacteristicFunct = CFUNCTYPE(c_bool, c_void_p, c_void_p, c_void_p, c_bool)
_CloseBleFunct = CFUNCTYPE(c_bool, c_void_p)
_DeviceEnumerationResponseFunct = CFUNCTYPE(None, c_void_p, POINTER(_DeviceDescriptorStruct), c_char_p)
_GetWirelessRegulatoryConfigCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(_WirelessRegConfigStruct))
# This is a fix for WEAV-429. Jay Logue recommends revisiting this at a later
# date to allow for truely multiple instances so this is temporary.
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
@_singleton
class WeaveDeviceManager(object):
def __init__(self, startNetworkThread=True):
self.devMgr = None
self.networkThread = None
self.networkThreadRunable = False
self._weaveStack = WeaveStack()
self._dmLib = None
self._InitLib()
devMgr = c_void_p(None)
res = self._dmLib.nl_Weave_DeviceManager_NewDeviceManager(pointer(devMgr))
if (res != 0):
raise self._weaveStack.ErrorToException(res)
self.devMgr = devMgr
self._weaveStack.devMgr = devMgr
def HandleDeviceEnumerationResponse(devMgr, deviceDescPtr, deviceAddrStr):
print(" Enumerated device IP: %s" % (WeaveUtility.CStringToString(deviceAddrStr)))
deviceDescPtr.contents.toDeviceDescriptor().Print(" ")
self.cbHandleDeviceEnumerationResponse = _DeviceEnumerationResponseFunct(HandleDeviceEnumerationResponse)
self.blockingCB = None # set by other modules(BLE) that require service by thread while thread blocks.
self.cbHandleBleEvent = None # set by other modules (BLE) that provide event callback to Weave.
self.cbHandleBleWriteChar = None
self.cbHandleBleSubscribeChar = None
self.cbHandleBleClose = None
if (startNetworkThread):
self.StartNetworkThread()
def __del__(self):
if (self.devMgr != None):
self._dmLib.nl_Weave_DeviceManager_DeleteDeviceManager(self.devMgr)
self.devMgr = None
self.StopNetworkThread()
def DriveBleIO(self):
# perform asynchronous write to pipe in IO thread's select() to wake for BLE input
res = self._dmLib.nl_Weave_DeviceManager_WakeForBleIO()
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetBleEventCB(self, bleEventCB):
if (self.devMgr != None):
self.cbHandleBleEvent = _GetBleEventFunct(bleEventCB)
self._dmLib.nl_Weave_DeviceManager_SetBleEventCB(self.cbHandleBleEvent)
def SetBleWriteCharCB(self, bleWriteCharCB):
if (self.devMgr != None):
self.cbHandleBleWriteChar = _WriteBleCharacteristicFunct(bleWriteCharCB)
self._dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic(self.cbHandleBleWriteChar)
def SetBleSubscribeCharCB(self, bleSubscribeCharCB):
if (self.devMgr != None):
self.cbHandleBleSubscribeChar = _SubscribeBleCharacteristicFunct(bleSubscribeCharCB)
self._dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic(self.cbHandleBleSubscribeChar)
def SetBleCloseCB(self, bleCloseCB):
if (self.devMgr != None):
self.cbHandleBleClose = _CloseBleFunct(bleCloseCB)
self._dmLib.nl_Weave_DeviceManager_SetBleClose(self.cbHandleBleClose)
def StartNetworkThread(self):
if (self.networkThread != None):
return
def RunNetworkThread():
while (self.networkThreadRunable):
self._weaveStack.networkLock.acquire()
self._dmLib.nl_Weave_DeviceManager_DriveIO(50)
self._weaveStack.networkLock.release()
time.sleep(0.005)
self.networkThread = Thread(target=RunNetworkThread, name="WeaveNetworkThread")
self.networkThread.daemon = True
self.networkThreadRunable = True
self.networkThread.start()
def StopNetworkThread(self):
if (self.networkThread != None):
self.networkThreadRunable = False
self.networkThread.join()
self.networkThread = None
def IsConnected(self):
return self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_IsConnected(self.devMgr)
)
def DeviceId(self):
return self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_DeviceId(self.devMgr)
)
def DeviceAddress(self):
return self._weaveStack.Call(
lambda: WeaveUtility.CStringToString(self._dmLib.nl_Weave_DeviceManager_DeviceAddress(self.devMgr))
)
def SetRendezvousAddress(self, addr, intf = None):
if addr is not None and "\x00" in addr:
raise ValueError("Unexpected NUL character in addr");
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress(self.devMgr, WeaveUtility.StringToCString(addr), WeaveUtility.StringToCString(intf))
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetConnectTimeout(self, timeoutMS):
if timeoutMS < 0 or timeoutMS > pow(2,32):
raise ValueError("timeoutMS must be an unsigned 32-bit integer")
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetConnectTimeout(self.devMgr, timeoutMS)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetAutoReconnect(self, autoReconnect):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetAutoReconnect(self.devMgr, autoReconnect)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetRendezvousLinkLocal(self, RendezvousLinkLocal):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal(self.devMgr, RendezvousLinkLocal)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def StartDeviceEnumeration(self, targetFabricId=TargetFabricId_AnyFabric,
targetModes=TargetDeviceMode_Any,
targetVendorId=TargetVendorId_Any,
targetProductId=TargetProductId_Any,
targetDeviceId=TargetDeviceId_Any):
deviceCriteria = _IdentifyDeviceCriteriaStruct()
deviceCriteria.TargetFabricId = targetFabricId
deviceCriteria.TargetModes = targetModes
deviceCriteria.TargetVendorId = targetVendorId
deviceCriteria.TargetProductId = targetProductId
deviceCriteria.TargetDeviceId = targetDeviceId
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration(self.devMgr, deviceCriteria, self.cbHandleDeviceEnumerationResponse, self._weaveStack.cbHandleError)
)
def StopDeviceEnumeration(self):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration(self.devMgr)
)
def ConnectDevice(self, deviceId, deviceAddr=None,
pairingCode=None, accessToken=None):
if deviceAddr is not None and '\x00' in deviceAddr:
raise ValueError("Unexpected NUL character in deviceAddr")
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.ConnectDevice')
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth(self.devMgr, deviceId, WeaveUtility.StringToCString(deviceAddr), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode(self.devMgr, deviceId, WeaveUtility.StringToCString(deviceAddr), WeaveUtility.StringToCString(pairingCode), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken(self.devMgr, deviceId, WeaveUtility.StringToCString(deviceAddr), WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RendezvousDevice(self, pairingCode=None, accessToken=None,
targetFabricId=TargetFabricId_AnyFabric,
targetModes=TargetDeviceMode_Any,
targetVendorId=TargetVendorId_Any,
targetProductId=TargetProductId_Any,
targetDeviceId=TargetDeviceId_Any):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.RendezvousDevice')
deviceCriteria = _IdentifyDeviceCriteriaStruct()
deviceCriteria.TargetFabricId = targetFabricId
deviceCriteria.TargetModes = targetModes
deviceCriteria.TargetVendorId = targetVendorId
deviceCriteria.TargetProductId = targetProductId
deviceCriteria.TargetDeviceId = targetDeviceId
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth(self.devMgr, deviceCriteria, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode(self.devMgr, WeaveUtility.StringToCString(pairingCode), deviceCriteria, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), deviceCriteria, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
# methods for testing BLE performance are not a part of the Weave Device Manager API, but rather are considered internal.
def TestBle(self, connObj, count, duration, delay, ack, size, rx):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TestBle(self.devMgr, connObj, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError, count, duration, delay, ack, size, rx)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def TestResultBle(self, connObj, local):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TestResultBle(self.devMgr, connObj, local)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def TestAbortBle(self, connObj):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TestAbortBle(self.devMgr, connObj)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def TxTimingBle(self, connObj, enabled, remote):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TxTimingBle(self.devMgr, connObj, enabled, remote)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
# end of BLE testing methods
def ConnectBle(self, bleConnection, pairingCode=None, accessToken=None):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.ConnectBle')
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth(self.devMgr, bleConnection, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode(self.devMgr, bleConnection, WeaveUtility.StringToCString(pairingCode), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken(self.devMgr, bleConnection, WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def PassiveRendezvousDevice(self, pairingCode=None, accessToken=None):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.PassiveRendezvousDevice')
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode(self.devMgr, WeaveUtility.StringToCString(pairingCode), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RemotePassiveRendezvous(self, rendezvousDeviceAddr=None, pairingCode=None, accessToken=None, rendezvousTimeout=None, inactivityTimeout=None):
if rendezvousDeviceAddr == None:
rendezvousDeviceAddr = "::"
if '\x00' in rendezvousDeviceAddr:
raise ValueError("Unexpected NUL character in rendezvousDeviceAddr")
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth(self.devMgr, WeaveUtility.StringToCString(rendezvousDeviceAddr), rendezvousTimeout, inactivityTimeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth(self.devMgr, WeaveUtility.StringToCString(rendezvousDeviceAddr), WeaveUtility.StringToCString(pairingCode), rendezvousTimeout, inactivityTimeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth(self.devMgr, WeaveUtility.StringToCString(rendezvousDeviceAddr), WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), rendezvousTimeout, inactivityTimeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def ReconnectDevice(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ReconnectDevice(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def Close(self):
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_Close(self.devMgr)
)
def EnableConnectionMonitor(self, interval, timeout):
if interval < 0 or interval > pow(2,16):
raise ValueError("interval must be an unsigned 16-bit unsigned value")
if timeout < 0 or timeout > pow(2,16):
raise ValueError("timeout must be an unsigned 16-bit unsigned value")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor(self.devMgr, interval, timeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def DisableConnectionMonitor(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def IdentifyDevice(self):
def HandleIdentifyDeviceComplete(devMgr, reqState, deviceDescPtr):
self._weaveStack.callbackRes = deviceDescPtr.contents.toDeviceDescriptor()
self._weaveStack.completeEvent.set()
cbHandleIdentifyDeviceComplete = _IdentifyDeviceCompleteFunct(HandleIdentifyDeviceComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_IdentifyDevice(self.devMgr, cbHandleIdentifyDeviceComplete, self._weaveStack.cbHandleError)
)
def PairToken(self, pairingToken):
def HandlePairTokenComplete(devMgr, reqState, tokenPairingBundlePtr, tokenPairingBundleLen):
self._weaveStack.callbackRes = WeaveUtility.VoidPtrToByteArray(tokenPairingBundlePtr, tokenPairingBundleLen)
self._weaveStack.completeEvent.set()
cbHandlePairTokenComplete = _PairTokenCompleteFunct(HandlePairTokenComplete)
if pairingToken is not None and isinstance(pairingToken, str):
pairingToken = WeaveUtility.StringToCString(pairingToken)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PairToken(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(pairingToken), len(pairingToken), cbHandlePairTokenComplete, self._weaveStack.cbHandleError)
)
def UnpairToken(self):
def HandleUnpairTokenComplete(devMgr, reqState):
self._weaveStack.callbackRes = True
self._weaveStack.completeEvent.set()
cbHandleUnpairTokenComplete = _UnpairTokenCompleteFunct(HandleUnpairTokenComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UnpairToken(self.devMgr, cbHandleUnpairTokenComplete, self._weaveStack.cbHandleError)
)
def ScanNetworks(self, networkType):
def HandleScanNetworksComplete(devMgr, reqState, netCount, netInfoPtr):
self._weaveStack.callbackRes = [ netInfoPtr[i].toNetworkInfo() for i in range(netCount) ]
self._weaveStack.completeEvent.set()
cbHandleScanNetworksComplete = _NetworkScanCompleteFunct(HandleScanNetworksComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ScanNetworks(self.devMgr, networkType, cbHandleScanNetworksComplete, self._weaveStack.cbHandleError)
)
def GetNetworks(self, getFlags):
def HandleGetNetworksComplete(devMgr, reqState, netCount, netInfoPtr):
self._weaveStack.callbackRes = [ netInfoPtr[i].toNetworkInfo() for i in range(netCount) ]
self._weaveStack.completeEvent.set()
cbHandleGetNetworksComplete = _GetNetworksCompleteFunct(HandleGetNetworksComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetNetworks(self.devMgr, getFlags, cbHandleGetNetworksComplete, self._weaveStack.cbHandleError)
)
def GetCameraAuthData(self, nonce):
if nonce is not None and '\x00' in nonce:
raise ValueError("Unexpected NUL character in nonce")
def HandleGetCameraAuthDataComplete(devMgr, reqState, macAddress, signedCameraPayload):
self.callbackRes = [ WeaveUtility.CStringToString(macAddress), WeaveUtility.CStringToString(signedCameraPayload) ]
self.completeEvent.set()
cbHandleGetCameraAuthDataComplete = _GetCameraAuthDataCompleteFunct(HandleGetCameraAuthDataComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetCameraAuthData(self.devMgr, WeaveUtility.StringToCString(nonce), cbHandleGetCameraAuthDataComplete, self._weaveStack.cbHandleError)
)
def AddNetwork(self, networkInfo):
def HandleAddNetworkComplete(devMgr, reqState, networkId):
self._weaveStack.callbackRes = networkId
self._weaveStack.completeEvent.set()
cbHandleAddNetworkComplete = _AddNetworkCompleteFunct(HandleAddNetworkComplete)
networkInfoStruct = _NetworkInfoStruct.fromNetworkInfo(networkInfo)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_AddNetwork(self.devMgr, networkInfoStruct, cbHandleAddNetworkComplete, self._weaveStack.cbHandleError)
)
def UpdateNetwork(self, networkInfo):
networkInfoStruct = _NetworkInfoStruct.fromNetworkInfo(networkInfo)
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UpdateNetwork(self.devMgr, networkInfoStruct, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RemoveNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemoveNetwork(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def EnableNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_EnableNetwork(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def DisableNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_DisableNetwork(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def TestNetworkConnectivity(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetRendezvousMode(self):
def HandleGetRendezvousModeComplete(devMgr, reqState, modeFlags):
self._weaveStack.callbackRes = modeFlags
self._weaveStack.completeEvent.set()
cbHandleGetRendezvousModeComplete = _GetRendezvousModeCompleteFunct(HandleGetRendezvousModeComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetRendezvousMode(self.devMgr, cbHandleGetRendezvousModeComplete, self._weaveStack.cbHandleError)
)
def SetRendezvousMode(self, modeFlags):
if modeFlags < 0 or modeFlags > pow(2,16):
raise ValueError("modeFlags must be an unsigned 16-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_SetRendezvousMode(self.devMgr, modeFlags, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetWirelessRegulatoryConfig(self):
def HandleComplete(devMgr, reqState, regConfigPtr):
self._weaveStack.callbackRes = regConfigPtr[0].toWirelessRegConfig()
self._weaveStack.completeEvent.set()
cbHandleComplete = _GetWirelessRegulatoryConfigCompleteFunct(HandleComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetWirelessRegulatoryConfig(self.devMgr, cbHandleComplete, self._weaveStack.cbHandleError)
)
def SetWirelessRegulatoryConfig(self, regConfig):
regConfigStruct = _WirelessRegConfigStruct.fromWirelessRegConfig(regConfig)
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_SetWirelessRegulatoryConfig(self.devMgr, regConfigStruct, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetLastNetworkProvisioningResult(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def CreateFabric(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_CreateFabric(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def LeaveFabric(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_LeaveFabric(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetFabricConfig(self):
def HandleGetFabricConfigComplete(devMgr, reqState, fabricConfigPtr, fabricConfigLen):
self._weaveStack.callbackRes = WeaveUtility.VoidPtrToByteArray(fabricConfigPtr, fabricConfigLen)
self._weaveStack.completeEvent.set()
cbHandleGetFabricConfigComplete = _GetFabricConfigCompleteFunct(HandleGetFabricConfigComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetFabricConfig(self.devMgr, cbHandleGetFabricConfigComplete, self._weaveStack.cbHandleError)
)
def JoinExistingFabric(self, fabricConfig):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_JoinExistingFabric(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(fabricConfig), len(fabricConfig),
self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def Ping(self):
WeaveUtility.StringToCString("test")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_Ping(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RegisterServicePairAccount(self, serviceId, accountId, serviceConfig, pairingToken, pairingInitData):
if accountId is not None and '\x00' in accountId:
raise ValueError("Unexpected NUL character in accountId")
if pairingToken is not None and isinstance(pairingToken, str):
pairingToken = WeaveUtility.StringToCString(pairingToken)
if pairingInitData is not None and isinstance(pairingInitData, str):
pairingInitData = WeaveUtility.StringToCString(pairingInitData)
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount(self.devMgr, serviceId, WeaveUtility.StringToCString(accountId),
WeaveUtility.ByteArrayToVoidPtr(serviceConfig), len(serviceConfig),
WeaveUtility.ByteArrayToVoidPtr(pairingToken), len(pairingToken),
WeaveUtility.ByteArrayToVoidPtr(pairingInitData), len(pairingInitData),
self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def UpdateService(self, serviceId, serviceConfig):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UpdateService(self.devMgr, serviceId, WeaveUtility.ByteArrayToVoidPtr(serviceConfig),
len(serviceConfig), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def UnregisterService(self, serviceId):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UnregisterService(self.devMgr, serviceId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def ArmFailSafe(self, armMode, failSafeToken):
if armMode < 0 or armMode > pow(2, 8):
raise ValueError("armMode must be an unsigned 8-bit integer")
if failSafeToken < 0 or failSafeToken > pow(2, 32):
raise ValueError("failSafeToken must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ArmFailSafe(self.devMgr, armMode, failSafeToken, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def DisarmFailSafe(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_DisarmFailSafe(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def ResetConfig(self, resetFlags):
if resetFlags < 0 or resetFlags > pow(2, 16):
raise ValueError("resetFlags must be an unsigned 16-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ResetConfig(self.devMgr, resetFlags, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def CloseEndpoints(self):
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_CloseEndpoints()
)
def SetLogFilter(self, category):
if category < 0 or category > pow(2, 8):
raise ValueError("category must be an unsigned 8-bit integer")
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetLogFilter(category)
)
def GetLogFilter(self):
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_GetLogFilter()
)
def SetBlockingCB(self, blockingCB):
self._weaveStack.blockingCB = blockingCB
def StartSystemTest(self, profileId, testId):
if profileId < 0 or profileId > pow(2, 32):
raise ValueError("profileId must be an unsigned 32-bit integer")
if testId < 0 or testId > pow(2, 32):
raise ValueError("testId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_StartSystemTest(self.devMgr, profileId, testId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def StopSystemTest(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_StopSystemTest(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
# ----- Private Members -----
def _InitLib(self):
if (self._dmLib == None):
self._dmLib = CDLL(self._weaveStack.LocateWeaveDLL())
self._dmLib.nl_Weave_DeviceManager_NewDeviceManager.argtypes = [ POINTER(c_void_p) ]
self._dmLib.nl_Weave_DeviceManager_NewDeviceManager.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DeleteDeviceManager.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_DeleteDeviceManager.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_Close.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_Close.restype = None
self._dmLib.nl_Weave_DeviceManager_DriveIO.argtypes = [ c_uint32 ]
self._dmLib.nl_Weave_DeviceManager_DriveIO.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_WakeForBleIO.argtypes = [ ]
self._dmLib.nl_Weave_DeviceManager_WakeForBleIO.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleEventCB.argtypes = [ _GetBleEventFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleEventCB.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic.argtypes = [ _WriteBleCharacteristicFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic.argtypes = [ _SubscribeBleCharacteristicFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleClose.argtypes = [ _CloseBleFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleClose.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_IsConnected.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_IsConnected.restype = c_bool
self._dmLib.nl_Weave_DeviceManager_DeviceId.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_DeviceId.restype = c_uint64
self._dmLib.nl_Weave_DeviceManager_DeviceAddress.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_DeviceAddress.restype = c_char_p
self._dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration.argtypes = [ c_void_p, POINTER(_IdentifyDeviceCriteriaStruct), _DeviceEnumerationResponseFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration.restype = None
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth.argtypes = [ c_void_p, c_uint64, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode.argtypes = [ c_void_p, c_uint64, c_char_p, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken.argtypes = [ c_void_p, c_uint64, c_char_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth.argtypes = [ c_void_p, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode.argtypes = [ c_void_p, c_char_p, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken.argtypes = [ c_void_p, c_void_p, c_uint32, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode.argtypes = [ c_void_p, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken.argtypes = [ c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestBle.argtypes = [ c_void_p, c_void_p, _CompleteFunct, _ErrorFunct, c_uint32, c_uint32, c_uint16, c_uint8, c_uint16, c_bool ]
self._dmLib.nl_Weave_DeviceManager_TestBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestResultBle.argtypes = [ c_void_p, c_void_p, c_bool ]
self._dmLib.nl_Weave_DeviceManager_TestResultBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestAbortBle.argtypes = [ c_void_p, c_void_p ]
self._dmLib.nl_Weave_DeviceManager_TestAbortBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TxTimingBle.argtypes = [ c_void_p, c_void_p, c_bool, c_bool ]
self._dmLib.nl_Weave_DeviceManager_TxTimingBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth.argtypes = [ c_void_p, c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode.argtypes = [ c_void_p, c_void_p, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken.argtypes = [ c_void_p, c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth.argtypes = [ c_void_p, c_char_p, c_char_p, c_uint32, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth.argtypes = [ c_void_p, c_char_p, c_char_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth.argtypes = [ c_void_p, c_char_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ReconnectDevice.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ReconnectDevice.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor.argtypes = [ c_void_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_IdentifyDevice.argtypes = [ c_void_p, _IdentifyDeviceCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_IdentifyDevice.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PairToken.argtypes = [ c_void_p, c_void_p, c_uint32, _PairTokenCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PairToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UnpairToken.argtypes = [ c_void_p, _UnpairTokenCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UnpairToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ScanNetworks.argtypes = [ c_void_p, c_int, _NetworkScanCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ScanNetworks.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetNetworks.argtypes = [ c_void_p, c_int, _GetNetworksCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetNetworks.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetCameraAuthData.argtypes = [ c_void_p, c_char_p, _GetCameraAuthDataCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetCameraAuthData.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_AddNetwork.argtypes = [ c_void_p, POINTER(_NetworkInfoStruct), _AddNetworkCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UpdateNetwork.argtypes = [ c_void_p, POINTER(_NetworkInfoStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UpdateNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemoveNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemoveNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_EnableNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_EnableNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DisableNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_DisableNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetRendezvousMode.argtypes = [ c_void_p, _GetRendezvousModeCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetRendezvousMode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousMode.argtypes = [ c_void_p, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousMode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetWirelessRegulatoryConfig.argtypes = [ c_void_p, _GetWirelessRegulatoryConfigCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetWirelessRegulatoryConfig.argtypes = [ c_void_p, POINTER(_WirelessRegConfigStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_LeaveFabric.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_LeaveFabric.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetFabricConfig.argtypes = [ c_void_p, _GetFabricConfigCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetFabricConfig.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.argtypes = [ c_void_p, c_char_p, c_char_p ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_JoinExistingFabric.argtypes = [ c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_JoinExistingFabric.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_Ping.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_Ping.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.argtypes = [ c_void_p, c_char_p ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetConnectTimeout.argtypes = [ c_void_p, c_uint32 ]
self._dmLib.nl_Weave_DeviceManager_SetConnectTimeout.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetAutoReconnect.argtypes = [ c_void_p, c_bool ]
self._dmLib.nl_Weave_DeviceManager_SetAutoReconnect.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal.argtypes = [ c_void_p, c_bool ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount.argtypes = [ c_void_p, c_uint64, c_char_p, c_void_p, c_uint32, c_void_p, c_uint32, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UpdateService.argtypes = [ c_void_p, c_uint64, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UpdateService.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UnregisterService.argtypes = [ c_void_p, c_uint64, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UnregisterService.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ArmFailSafe.argtypes = [ c_void_p, c_uint8, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ArmFailSafe.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DisarmFailSafe.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_DisarmFailSafe.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ResetConfig.argtypes = [ c_void_p, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ResetConfig.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetLogFilter.argtypes = [ ]
self._dmLib.nl_Weave_DeviceManager_GetLogFilter.restype = c_uint8
self._dmLib.nl_Weave_DeviceManager_SetLogFilter.argtypes = [ c_uint8 ]
self._dmLib.nl_Weave_DeviceManager_SetLogFilter.restype = None
self._dmLib.nl_Weave_DeviceManager_CloseEndpoints.argtypes = [ ]
self._dmLib.nl_Weave_DeviceManager_CloseEndpoints.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_StartSystemTest.argtypes = [ c_void_p, c_uint32, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_StartSystemTest.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_StopSystemTest.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_StopSystemTest.restype = c_uint32
def NetworkTypeToString(val):
if (val == NetworkType_WiFi):
return "WiFi"
if (val == NetworkType_Thread):
return "Thread"
if (val != None):
return "UNKNOWN (" + str(val)+ ")"
return None
def ParseNetworkType(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "wifi"):
return NetworkType_WiFi
if (val == "thread"):
return NetworkType_Thread
raise Exception("Invalid network type: " + str(val))
def WiFiModeToString(val):
if (val == WiFiMode_AdHoc):
return "AdHoc"
if (val == WiFiMode_Managed):
return "Managed"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseWiFiMode(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "adhoc" or val == "ad-hoc"):
return WiFiMode_AdHoc
if (val == "managed"):
return WiFiMode_Managed
raise Exception("Invalid Wifi mode: " + str(val))
def WiFiRoleToString(val):
if (val == WiFiRole_Station):
return "Station"
if (val == WiFiRole_AccessPoint):
return "AccessPoint"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseWiFiRole(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "station"):
return WiFiRole_Station
if (val == "accesspoint" or val == "access-point"):
return WiFiRole_AccessPoint
raise Exception("Invalid Wifi role: " + str(val))
def WiFiSecurityTypeToString(val):
if (val == WiFiSecurityType_None):
return "None"
if (val == WiFiSecurityType_WEP):
return "WEP"
if (val == WiFiSecurityType_WPAPersonal):
return "WPA"
if (val == WiFiSecurityType_WPA2Personal):
return "WPA2"
if (val == WiFiSecurityType_WPA2MixedPersonal):
return "WPA2Mixed"
if (val == WiFiSecurityType_WPAEnterprise):
return "WPAEnterprise"
if (val == WiFiSecurityType_WPA2Enterprise):
return "WPA2Enterprise"
if (val == WiFiSecurityType_WPA2MixedEnterprise):
return "WPA2MixedEnterprise"
if (val == WiFiSecurityType_WPA3Personal):
return "WPA3"
if (val == WiFiSecurityType_WPA3MixedPersonal):
return "WPA3Mixed"
if (val == WiFiSecurityType_WPA3Enterprise):
return "WPA3Enterprise"
if (val == WiFiSecurityType_WPA3MixedEnterprise):
return "WPA3MixedEnterprise"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseSecurityType(val):
val = val.lower()
if (val == 'none'):
return WiFiSecurityType_None
if (val == 'wep'):
return WiFiSecurityType_WEP
if (val == 'wpa' or val == 'wpapersonal' or val == 'wpa-personal'):
return WiFiSecurityType_WPAPersonal
if (val == 'wpa2' or val == 'wpa2personal' or val == 'wpa2-personal'):
return WiFiSecurityType_WPA2Personal
if (val == 'wpa3' or val == 'wpa3personal' or val == 'wpa3-personal'):
return WiFiSecurityType_WPA3Personal
if (val == 'wpa2mixed' or val == 'wpa2-mixed' or val == 'wpa2mixedpersonal' or val == 'wpa2-mixed-personal'):
return WiFiSecurityType_WPA2MixedPersonal
if (val == 'wpa3mixed' or val == 'wpa3-mixed' or val == 'wpa3mixedpersonal' or val == 'wpa3-mixed-personal'):
return WiFiSecurityType_WPA3MixedPersonal
if (val == 'wpaenterprise' or val == 'wpa-enterprise'):
return WiFiSecurityType_WPAEnterprise
if (val == 'wpa2enterprise' or val == 'wpa2-enterprise'):
return WiFiSecurityType_WPA2Enterprise
if (val == 'wpa3enterprise' or val == 'wpa3-enterprise'):
return WiFiSecurityType_WPA3Enterprise
if (val == 'wpa2mixedenterprise' or val == 'wpa2-mixed-enterprise'):
return WiFiSecurityType_WPA2MixedEnterprise
if (val == 'wpa3mixedenterprise' or val == 'wpa3-mixed-enterprise'):
return WiFiSecurityType_WPA3MixedEnterprise
raise Exception("Invalid Wifi security type: " + str(val))
def DeviceFeatureToString(val):
if (val == DeviceFeature_HomeAlarmLinkCapable):
return "HomeAlarmLinkCapable"
if (val == DeviceFeature_LinePowered):
return "LinePowered"
return "0x%08X" % (val)
def OperatingLocationToString(val):
if val == 1:
return 'unknown'
if val == 2:
return 'indoors'
if val == 3:
return 'outdoors'
raise Exception("Invalid operating location: " + str(val))
def ParseOperatingLocation(val):
val = val.lower()
if val == 'unknown':
return 1
if val == 'indoors':
return 2
if val == 'outdoors':
return 3
raise Exception("Invalid operating location: " + str(val))
| 53.922863 | 325 | 0.699156 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.