hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7952fdfe210cf955b6018a8b4f1c8dcd742e577b
| 5,102
|
py
|
Python
|
nitro/resource/config/ns/nstcpbufparam.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | 2
|
2020-08-24T18:04:22.000Z
|
2020-08-24T18:04:47.000Z
|
nitro/resource/config/ns/nstcpbufparam.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
nitro/resource/config/ns/nstcpbufparam.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class nstcpbufparam(base_resource) :
"""Configuration for tcp buffer parameter resource."""
def __init__(self) :
self._size = 0
self._memlimit = 0
@property
def size(self) :
"""TCP buffering size per connection, in kilobytes.<br/>Default value: 64<br/>Minimum length = 4<br/>Maximum length = 20480."""
try :
return self._size
except Exception as e:
raise e
@size.setter
def size(self, size) :
"""TCP buffering size per connection, in kilobytes.<br/>Default value: 64<br/>Minimum length = 4<br/>Maximum length = 20480
:param size:
"""
try :
self._size = size
except Exception as e:
raise e
@property
def memlimit(self) :
"""Maximum memory, in megabytes, that can be used for buffering.<br/>Default value: 64."""
try :
return self._memlimit
except Exception as e:
raise e
@memlimit.setter
def memlimit(self, memlimit) :
"""Maximum memory, in megabytes, that can be used for buffering.<br/>Default value: 64
:param memlimit:
"""
try :
self._memlimit = memlimit
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(nstcpbufparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nstcpbufparam
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update nstcpbufparam.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = nstcpbufparam()
updateresource.size = resource.size
updateresource.memlimit = resource.memlimit
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of nstcpbufparam resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = nstcpbufparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the nstcpbufparam resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = nstcpbufparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class nstcpbufparam_response(base_response) :
""" """
def __init__(self, length=1) :
self.nstcpbufparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nstcpbufparam = [nstcpbufparam() for _ in range(length)]
| 31.493827
| 137
| 0.599373
|
7952fe3bf97975ce835c0142e84dd524de5fc242
| 2,077
|
py
|
Python
|
bench/atom10_test.py
|
kkszysiu/ultrafeedparser
|
f3f9a53013049a29771743b5e4ec97fb7c39080e
|
[
"MIT"
] | null | null | null |
bench/atom10_test.py
|
kkszysiu/ultrafeedparser
|
f3f9a53013049a29771743b5e4ec97fb7c39080e
|
[
"MIT"
] | null | null | null |
bench/atom10_test.py
|
kkszysiu/ultrafeedparser
|
f3f9a53013049a29771743b5e4ec97fb7c39080e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import time
import pytest
import feedparser
import ultrafeedparser
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
ATOM10_DATA = """
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Example Feed</title>
<link href="http://example.org/"/>
<updated>2003-12-13T18:30:02Z</updated>
<author>
<name>John Doe</name>
</author>
<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
<entry>
<title>Atom-Powered Robots Run Amok</title>
<link href="http://example.org/2003/12/13/atom03"/>
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
<updated>2003-12-13T18:30:02Z</updated>
<summary>Some text.</summary>
</entry>
</feed>
"""
@pytest.mark.benchmark(
group="atom10-parse",
min_time=0.1,
max_time=0.5,
min_rounds=10,
timer=time.time,
disable_gc=True,
warmup=False
)
def test_ultrafeedparser_parse(benchmark):
@benchmark
def parse():
print(ultrafeedparser.parse(ATOM10_DATA))
@pytest.mark.benchmark(
group="atom10-parse",
min_time=0.1,
max_time=0.5,
min_rounds=10,
timer=time.time,
disable_gc=True,
warmup=False
)
def test_feedparser_parse(benchmark):
@benchmark
def parse():
print(feedparser.parse(ATOM10_DATA))
if PY2:
import speedparser
@pytest.mark.benchmark(
group="atom10-parse",
min_time=0.1,
max_time=0.5,
min_rounds=10,
timer=time.time,
disable_gc=True,
warmup=False
)
def test_speedparser_parse(benchmark):
@benchmark
def parse():
print(speedparser.parse(ATOM10_DATA))
if PY3:
import atoma
ATOM10_DATA_BYTES = str.encode(ATOM10_DATA)
@pytest.mark.benchmark(
group="atom10-parse",
min_time=0.1,
max_time=0.5,
min_rounds=10,
timer=time.time,
disable_gc=True,
warmup=False
)
def test_atoma_parse(benchmark):
@benchmark
def parse():
print(atoma.parse_atom_bytes(ATOM10_DATA_BYTES))
| 21.635417
| 60
| 0.634569
|
7952fe4dbcb544af681681a908ce28d6671bdb8b
| 1,101
|
py
|
Python
|
_95.py
|
elfgzp/leetCode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | 3
|
2019-04-12T06:22:56.000Z
|
2019-05-04T04:25:01.000Z
|
_95.py
|
elfgzp/Leetcode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | null | null | null |
_95.py
|
elfgzp/Leetcode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'gzp'
from utils import TreeNode
class Solution:
cache = {}
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
if n == 0:
return []
return self._generateTrees(1, n)
def _generateTrees(self, left, right):
if left > right:
return [None]
if (left, right) in self.cache.keys():
return self.cache[(left, right)]
res = []
for i in range(left, right + 1):
left_nodes = self._generateTrees(left, i - 1)
right_nodes = self._generateTrees(i + 1, right)
for left_node in left_nodes:
for right_node in right_nodes:
root = TreeNode(i)
root.left = left_node
root.right = right_node
res.append(root)
self.cache[(left, right)] = res
return res
if __name__ == '__main__':
s = Solution()
res = s.generateTrees(3)
for each in res:
print(each.get_nodes())
| 23.425532
| 59
| 0.506812
|
7952fee4951eb2e973f97efaaebb4da80d34110c
| 653
|
py
|
Python
|
tools/lib/auth_config.py
|
salah608/OPENPILOT
|
be214b44947d2a52571b1031c25dde5d54a5fe10
|
[
"MIT"
] | 3
|
2019-06-24T07:46:43.000Z
|
2019-06-25T07:18:02.000Z
|
tools/lib/auth_config.py
|
salah608/OPENPILOT
|
be214b44947d2a52571b1031c25dde5d54a5fe10
|
[
"MIT"
] | 1
|
2019-06-24T07:55:47.000Z
|
2019-06-25T03:33:11.000Z
|
tools/lib/auth_config.py
|
salah608/OPENPILOT
|
be214b44947d2a52571b1031c25dde5d54a5fe10
|
[
"MIT"
] | 2
|
2019-06-25T06:40:15.000Z
|
2019-06-26T10:11:22.000Z
|
import json
import os
from common.file_helpers import mkdirs_exists_ok
from system.hardware import PC
class MissingAuthConfigError(Exception):
pass
if PC:
CONFIG_DIR = os.path.expanduser('~/.comma')
else:
CONFIG_DIR = "/tmp/.comma"
mkdirs_exists_ok(CONFIG_DIR)
def get_token():
try:
with open(os.path.join(CONFIG_DIR, 'auth.json')) as f:
auth = json.load(f)
return auth['access_token']
except Exception:
return None
def set_token(token):
with open(os.path.join(CONFIG_DIR, 'auth.json'), 'w') as f:
json.dump({'access_token': token}, f)
def clear_token():
os.unlink(os.path.join(CONFIG_DIR, 'auth.json'))
| 18.657143
| 61
| 0.701378
|
7952ffc65d6f5a2c929666249affd8ba4b03c5fd
| 343
|
py
|
Python
|
5.py
|
Polar1ty/euler_problems
|
bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90
|
[
"MIT"
] | 2
|
2020-06-09T10:35:12.000Z
|
2020-06-09T11:32:16.000Z
|
5.py
|
Polar1ty/euler_problems
|
bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90
|
[
"MIT"
] | null | null | null |
5.py
|
Polar1ty/euler_problems
|
bc1cd917d95d1b63b80a0b182dbd5e9f90a95d90
|
[
"MIT"
] | null | null | null |
# Smallest multiple
import time
start = time.time()
for i in range(1, 1000000000):
if i % 11 == 0 and i % 12 == 0 and i % 13 == 0 and i % 14 == 0 and i % 15 == 0 and i % 16 == 0 and i % 17 == 0 and i % 18 == 0 and i % 19 == 0 and i % 20 == 0:
print(i)
end = time.time()
print('Time of execution = ' + str(end - start))
| 31.181818
| 164
| 0.51895
|
795302b0cb2e341c667b782d79d15466edb70e60
| 21,120
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/google/gcp_dns_managed_zone.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/google/gcp_dns_managed_zone.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/google/gcp_dns_managed_zone.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_dns_managed_zone
description:
- A zone is a subtree of the DNS namespace under one administrative responsibility.
A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service.
short_description: Creates a GCP ManagedZone
version_added: 2.5
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- A mutable string of at most 1024 characters associated with this resource for
the user's convenience. Has no effect on the managed zone's function.
required: true
type: str
dns_name:
description:
- The DNS name of this managed zone, for instance "example.com.".
required: true
type: str
dnssec_config:
description:
- DNSSEC configuration.
required: false
type: dict
version_added: 2.9
suboptions:
kind:
description:
- Identifies what kind of resource this is.
required: false
default: dns#managedZoneDnsSecConfig
type: str
non_existence:
description:
- Specifies the mechanism used to provide authenticated denial-of-existence
responses.
- 'Some valid choices include: "nsec", "nsec3"'
required: false
type: str
state:
description:
- Specifies whether DNSSEC is enabled, and what mode it is in.
- 'Some valid choices include: "off", "on", "transfer"'
required: false
type: str
default_key_specs:
description:
- Specifies parameters that will be used for generating initial DnsKeys for
this ManagedZone. If you provide a spec for keySigning or zoneSigning, you
must also provide one for the other.
required: false
type: list
suboptions:
algorithm:
description:
- String mnemonic specifying the DNSSEC algorithm of this key.
- 'Some valid choices include: "ecdsap256sha256", "ecdsap384sha384", "rsasha1",
"rsasha256", "rsasha512"'
required: false
type: str
key_length:
description:
- Length of the keys in bits.
required: false
type: int
key_type:
description:
- Specifies whether this is a key signing key (KSK) or a zone signing
key (ZSK). Key signing keys have the Secure Entry Point flag set and,
when active, will only be used to sign resource record sets of type
DNSKEY. Zone signing keys do not have the Secure Entry Point flag set
and will be used to sign all other types of resource record sets. .
- 'Some valid choices include: "keySigning", "zoneSigning"'
required: false
type: str
kind:
description:
- Identifies what kind of resource this is.
required: false
default: dns#dnsKeySpec
type: str
name:
description:
- User assigned name for this resource.
- Must be unique within the project.
required: true
type: str
name_server_set:
description:
- Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet
is a set of DNS name servers that all host the same ManagedZones. Most users
will leave this field unset.
required: false
type: str
labels:
description:
- A set of key/value label pairs to assign to this ManagedZone.
required: false
type: dict
version_added: 2.8
visibility:
description:
- 'The zone''s visibility: public zones are exposed to the Internet, while private
zones are visible only to Virtual Private Cloud resources.'
- 'Must be one of: `public`, `private`.'
- 'Some valid choices include: "private", "public"'
required: false
default: public
type: str
version_added: 2.8
private_visibility_config:
description:
- For privately visible zones, the set of Virtual Private Cloud resources that
the zone is visible from.
required: false
type: dict
version_added: 2.8
suboptions:
networks:
description:
- The list of VPC networks that can see this zone.
required: false
type: list
suboptions:
network_url:
description:
- The fully qualified URL of the VPC network to bind to.
- This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`)
.
required: false
type: str
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/dns/api/v1/managedZones)'
- 'Managing Zones: U(https://cloud.google.com/dns/zones/)'
'''
EXAMPLES = '''
- name: create a managed zone
gcp_dns_managed_zone:
name: test_object
dns_name: test.somewild2.example.com.
description: test zone
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
description:
description:
- A mutable string of at most 1024 characters associated with this resource for
the user's convenience. Has no effect on the managed zone's function.
returned: success
type: str
dnsName:
description:
- The DNS name of this managed zone, for instance "example.com.".
returned: success
type: str
dnssecConfig:
description:
- DNSSEC configuration.
returned: success
type: complex
contains:
kind:
description:
- Identifies what kind of resource this is.
returned: success
type: str
nonExistence:
description:
- Specifies the mechanism used to provide authenticated denial-of-existence
responses.
returned: success
type: str
state:
description:
- Specifies whether DNSSEC is enabled, and what mode it is in.
returned: success
type: str
defaultKeySpecs:
description:
- Specifies parameters that will be used for generating initial DnsKeys for
this ManagedZone. If you provide a spec for keySigning or zoneSigning, you
must also provide one for the other.
returned: success
type: complex
contains:
algorithm:
description:
- String mnemonic specifying the DNSSEC algorithm of this key.
returned: success
type: str
keyLength:
description:
- Length of the keys in bits.
returned: success
type: int
keyType:
description:
- Specifies whether this is a key signing key (KSK) or a zone signing key
(ZSK). Key signing keys have the Secure Entry Point flag set and, when
active, will only be used to sign resource record sets of type DNSKEY.
Zone signing keys do not have the Secure Entry Point flag set and will
be used to sign all other types of resource record sets. .
returned: success
type: str
kind:
description:
- Identifies what kind of resource this is.
returned: success
type: str
id:
description:
- Unique identifier for the resource; defined by the server.
returned: success
type: int
name:
description:
- User assigned name for this resource.
- Must be unique within the project.
returned: success
type: str
nameServers:
description:
- Delegate your managed_zone to these virtual name servers; defined by the server
.
returned: success
type: list
nameServerSet:
description:
- Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is
a set of DNS name servers that all host the same ManagedZones. Most users will
leave this field unset.
returned: success
type: str
creationTime:
description:
- The time that this resource was created on the server.
- This is in RFC3339 text format.
returned: success
type: str
labels:
description:
- A set of key/value label pairs to assign to this ManagedZone.
returned: success
type: dict
visibility:
description:
- 'The zone''s visibility: public zones are exposed to the Internet, while private
zones are visible only to Virtual Private Cloud resources.'
- 'Must be one of: `public`, `private`.'
returned: success
type: str
privateVisibilityConfig:
description:
- For privately visible zones, the set of Virtual Private Cloud resources that the
zone is visible from.
returned: success
type: complex
contains:
networks:
description:
- The list of VPC networks that can see this zone.
returned: success
type: complex
contains:
networkUrl:
description:
- The fully qualified URL of the VPC network to bind to.
- This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`)
.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(required=True, type='str'),
dns_name=dict(required=True, type='str'),
dnssec_config=dict(
type='dict',
options=dict(
kind=dict(default='dns#managedZoneDnsSecConfig', type='str'),
non_existence=dict(type='str'),
state=dict(type='str'),
default_key_specs=dict(
type='list',
elements='dict',
options=dict(
algorithm=dict(type='str'), key_length=dict(type='int'), key_type=dict(type='str'), kind=dict(default='dns#dnsKeySpec', type='str')
),
),
),
),
name=dict(required=True, type='str'),
name_server_set=dict(type='str'),
labels=dict(type='dict'),
visibility=dict(default='public', type='str'),
private_visibility_config=dict(type='dict', options=dict(networks=dict(type='list', elements='dict', options=dict(network_url=dict(type='str'))))),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']
state = module.params['state']
kind = 'dns#managedZone'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'dns')
return return_if_object(module, auth.post(link, resource_to_request(module)), kind)
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if (
response.get('description') != request.get('description')
or response.get('labels') != request.get('labels')
or response.get('privateVisibilityConfig') != request.get('privateVisibilityConfig')
):
description_update(module, request, response)
def description_update(module, request, response):
auth = GcpSession(module, 'dns')
auth.patch(
''.join(["https://www.googleapis.com/dns/v1/", "projects/{project}/managedZones/{name}"]).format(**module.params),
{
u'description': module.params.get('description'),
u'labels': module.params.get('labels'),
u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(module.params.get('private_visibility_config', {}), module).to_request(),
},
)
def delete(module, link, kind):
auth = GcpSession(module, 'dns')
return return_if_object(module, auth.delete(link), kind)
def resource_to_request(module):
request = {
u'kind': 'dns#managedZone',
u'description': module.params.get('description'),
u'dnsName': module.params.get('dns_name'),
u'dnssecConfig': ManagedZoneDnssecconfig(module.params.get('dnssec_config', {}), module).to_request(),
u'name': module.params.get('name'),
u'nameServerSet': module.params.get('name_server_set'),
u'labels': module.params.get('labels'),
u'visibility': module.params.get('visibility'),
u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(module.params.get('private_visibility_config', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'dns')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'description': response.get(u'description'),
u'dnsName': response.get(u'dnsName'),
u'dnssecConfig': ManagedZoneDnssecconfig(response.get(u'dnssecConfig', {}), module).from_response(),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'nameServers': response.get(u'nameServers'),
u'nameServerSet': response.get(u'nameServerSet'),
u'creationTime': response.get(u'creationTime'),
u'labels': response.get(u'labels'),
u'visibility': response.get(u'visibility'),
u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(response.get(u'privateVisibilityConfig', {}), module).from_response(),
}
class ManagedZoneDnssecconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'kind': self.request.get('kind'),
u'nonExistence': self.request.get('non_existence'),
u'state': self.request.get('state'),
u'defaultKeySpecs': ManagedZoneDefaultkeyspecsArray(self.request.get('default_key_specs', []), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'kind': self.request.get(u'kind'),
u'nonExistence': self.request.get(u'nonExistence'),
u'state': self.request.get(u'state'),
u'defaultKeySpecs': ManagedZoneDefaultkeyspecsArray(self.request.get(u'defaultKeySpecs', []), self.module).from_response(),
}
)
class ManagedZoneDefaultkeyspecsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{u'algorithm': item.get('algorithm'), u'keyLength': item.get('key_length'), u'keyType': item.get('key_type'), u'kind': item.get('kind')}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{u'algorithm': item.get(u'algorithm'), u'keyLength': item.get(u'keyLength'), u'keyType': item.get(u'keyType'), u'kind': item.get(u'kind')}
)
class ManagedZonePrivatevisibilityconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'networks': ManagedZoneNetworksArray(self.request.get('networks', []), self.module).to_request()})
def from_response(self):
return remove_nones_from_dict({u'networks': ManagedZoneNetworksArray(self.request.get(u'networks', []), self.module).from_response()})
class ManagedZoneNetworksArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'networkUrl': item.get('network_url')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'networkUrl': item.get(u'networkUrl')})
if __name__ == '__main__':
main()
| 33.684211
| 159
| 0.615672
|
795302b4e0a8f72d38fa24d4cb82755338744c19
| 2,441
|
py
|
Python
|
neighbourhood/migrations/0001_initial.py
|
EmmanuelMuchiri/Neighbourhood
|
2412eab3f557db655329734254fb460009d81cc8
|
[
"MIT"
] | null | null | null |
neighbourhood/migrations/0001_initial.py
|
EmmanuelMuchiri/Neighbourhood
|
2412eab3f557db655329734254fb460009d81cc8
|
[
"MIT"
] | 4
|
2020-06-05T23:34:55.000Z
|
2021-06-10T21:57:02.000Z
|
neighbourhood/migrations/0001_initial.py
|
markmumba/defaulters
|
8f78863fb04932c0af0186442aa0873f861da5cb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-09-14 12:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('image', models.ImageField(upload_to='post/')),
('post', tinymce.models.HTMLField()),
('post_date', models.DateTimeField(auto_now_add=True)),
('profpic', models.ImageField(upload_to='profpics/')),
],
),
migrations.CreateModel(
name='neighbourhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('neighbourhood', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profpic', models.ImageField(upload_to='profpics/')),
('description', tinymce.models.HTMLField()),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.neighbourhood')),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='blogpost',
name='neighbourhood',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.neighbourhood'),
),
migrations.AddField(
model_name='blogpost',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 40.016393
| 132
| 0.602212
|
795302c3364c83eceba63908704b80f0160b5721
| 1,013
|
py
|
Python
|
chainerrl/agents/double_dqn.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | 2
|
2020-05-20T06:15:20.000Z
|
2020-05-20T06:15:27.000Z
|
chainerrl/agents/double_dqn.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | null | null | null |
chainerrl/agents/double_dqn.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases() # NOQA
import chainer
from chainerrl.agents import dqn
from chainerrl.recurrent import state_kept
class DoubleDQN(dqn.DQN):
"""Double DQN.
See: http://arxiv.org/abs/1509.06461.
"""
def _compute_target_values(self, exp_batch, gamma):
batch_next_state = exp_batch['next_state']
with chainer.using_config('train', False), state_kept(self.q_function):
next_qout = self.q_function(batch_next_state)
target_next_qout = self.target_q_function(batch_next_state)
next_q_max = target_next_qout.evaluate_actions(
next_qout.greedy_actions)
batch_rewards = exp_batch['reward']
batch_terminal = exp_batch['is_state_terminal']
return batch_rewards + self.gamma * (1.0 - batch_terminal) * next_q_max
| 28.138889
| 79
| 0.739388
|
7953033cbf0cbd23a941af3ee95e30e52edcc434
| 13,629
|
py
|
Python
|
oscrypto/_mac/_core_foundation_ctypes.py
|
frennkie/oscrypto
|
24aff3148379b931d9c72ab3b069e537dc2195f8
|
[
"MIT"
] | 1
|
2020-05-17T06:44:51.000Z
|
2020-05-17T06:44:51.000Z
|
oscrypto/_mac/_core_foundation_ctypes.py
|
frennkie/oscrypto
|
24aff3148379b931d9c72ab3b069e537dc2195f8
|
[
"MIT"
] | null | null | null |
oscrypto/_mac/_core_foundation_ctypes.py
|
frennkie/oscrypto
|
24aff3148379b931d9c72ab3b069e537dc2195f8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
from ctypes.util import find_library
from ctypes import c_void_p, c_long, c_uint32, c_char_p, c_byte, c_ulong, c_bool
from ctypes import CDLL, string_at, cast, POINTER, byref
import ctypes
from .._ffi import FFIEngineError, buffer_from_bytes, byte_string_from_buffer
from ..errors import LibraryNotFoundError
__all__ = [
'CFHelpers',
'CoreFoundation',
]
core_foundation_path = find_library('CoreFoundation')
if not core_foundation_path:
raise LibraryNotFoundError('The library CoreFoundation could not be found')
CoreFoundation = CDLL(core_foundation_path, use_errno=True)
CFIndex = c_long
CFStringEncoding = c_uint32
CFArray = c_void_p
CFData = c_void_p
CFString = c_void_p
CFNumber = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFTypeID = c_ulong
CFBoolean = c_void_p
CFNumberType = c_uint32
CFTypeRef = POINTER(CFType)
CFArrayRef = POINTER(CFArray)
CFDataRef = POINTER(CFData)
CFStringRef = POINTER(CFString)
CFNumberRef = POINTER(CFNumber)
CFBooleanRef = POINTER(CFBoolean)
CFDictionaryRef = POINTER(CFDictionary)
CFErrorRef = POINTER(CFError)
CFAllocatorRef = c_void_p
CFDictionaryKeyCallBacks = c_void_p
CFDictionaryValueCallBacks = c_void_p
CFArrayCallBacks = c_void_p
pointer_p = POINTER(c_void_p)
try:
CoreFoundation.CFDataGetLength.argtypes = [
CFDataRef
]
CoreFoundation.CFDataGetLength.restype = CFIndex
CoreFoundation.CFDataGetBytePtr.argtypes = [
CFDataRef
]
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
CoreFoundation.CFDataCreate.argtypes = [
CFAllocatorRef,
c_char_p,
CFIndex
]
CoreFoundation.CFDataCreate.restype = CFDataRef
CoreFoundation.CFDictionaryCreate.argtypes = [
CFAllocatorRef,
CFStringRef,
CFTypeRef,
CFIndex,
CFDictionaryKeyCallBacks,
CFDictionaryValueCallBacks
]
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
CoreFoundation.CFDictionaryGetCount.argtypes = [
CFDictionaryRef
]
CoreFoundation.CFDictionaryGetCount.restype = CFIndex
CoreFoundation.CFStringGetCStringPtr.argtypes = [
CFStringRef,
CFStringEncoding
]
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
CoreFoundation.CFStringGetCString.argtypes = [
CFStringRef,
c_char_p,
CFIndex,
CFStringEncoding
]
CoreFoundation.CFStringGetCString.restype = c_bool
CoreFoundation.CFStringCreateWithCString.argtypes = [
CFAllocatorRef,
c_char_p,
CFStringEncoding
]
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
CoreFoundation.CFNumberCreate.argtypes = [
CFAllocatorRef,
CFNumberType,
c_void_p
]
CoreFoundation.CFNumberCreate.restype = CFNumberRef
CoreFoundation.CFCopyTypeIDDescription.argtypes = [
CFTypeID
]
CoreFoundation.CFCopyTypeIDDescription.restype = CFStringRef
CoreFoundation.CFRelease.argtypes = [
CFTypeRef
]
CoreFoundation.CFRelease.restype = None
CoreFoundation.CFRetain.argtypes = [
CFTypeRef
]
CoreFoundation.CFRetain.restype = None
CoreFoundation.CFErrorCopyDescription.argtypes = [
CFErrorRef
]
CoreFoundation.CFErrorCopyDescription.restype = CFStringRef
CoreFoundation.CFErrorGetDomain.argtypes = [
CFErrorRef
]
CoreFoundation.CFErrorGetDomain.restype = CFStringRef
CoreFoundation.CFErrorGetCode.argtypes = [
CFErrorRef
]
CoreFoundation.CFErrorGetCode.restype = CFIndex
CoreFoundation.CFBooleanGetValue.argtypes = [
CFBooleanRef
]
CoreFoundation.CFBooleanGetValue.restype = c_byte
CoreFoundation.CFDictionaryGetTypeID.argtypes = []
CoreFoundation.CFDictionaryGetTypeID.restype = CFTypeID
CoreFoundation.CFNumberGetTypeID.argtypes = []
CoreFoundation.CFNumberGetTypeID.restype = CFTypeID
CoreFoundation.CFStringGetTypeID.argtypes = []
CoreFoundation.CFStringGetTypeID.restype = CFTypeID
CoreFoundation.CFDataGetTypeID.argtypes = []
CoreFoundation.CFDataGetTypeID.restype = CFTypeID
CoreFoundation.CFArrayCreate.argtypes = [
CFAllocatorRef,
POINTER(c_void_p),
CFIndex,
CFArrayCallBacks
]
CoreFoundation.CFArrayCreate.restype = CFArrayRef
CoreFoundation.CFArrayGetCount.argtypes = [
CFArrayRef
]
CoreFoundation.CFArrayGetCount.restype = CFIndex
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
CFArrayRef,
CFIndex
]
CoreFoundation.CFArrayGetValueAtIndex.restype = CFTypeRef
CoreFoundation.CFNumberGetType.argtypes = [
CFNumberRef
]
CoreFoundation.CFNumberGetType.restype = CFNumberType
CoreFoundation.CFNumberGetValue.argtypes = [
CFNumberRef,
CFNumberType,
c_void_p
]
CoreFoundation.CFNumberGetValue.restype = c_bool
CoreFoundation.CFDictionaryGetKeysAndValues.argtypes = [
CFDictionaryRef,
pointer_p,
pointer_p
]
CoreFoundation.CFDictionaryGetKeysAndValues.restype = CFIndex
CoreFoundation.CFGetTypeID.argtypes = [
CFTypeRef
]
CoreFoundation.CFGetTypeID.restype = CFTypeID
setattr(CoreFoundation, 'kCFAllocatorDefault', CFAllocatorRef.in_dll(CoreFoundation, 'kCFAllocatorDefault'))
setattr(CoreFoundation, 'kCFBooleanTrue', CFTypeRef.in_dll(CoreFoundation, 'kCFBooleanTrue'))
kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeDictionaryKeyCallBacks')
kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeDictionaryValueCallBacks')
kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(CoreFoundation, 'CFDataRef', CFDataRef)
setattr(CoreFoundation, 'CFErrorRef', CFErrorRef)
setattr(CoreFoundation, 'CFArrayRef', CFArrayRef)
kCFNumberCFIndexType = CFNumberType(14)
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
def _cast_pointer_p(value):
"""
Casts a value to a pointer of a pointer
:param value:
A ctypes object
:return:
A POINTER(c_void_p) object
"""
return cast(value, pointer_p)
class CFHelpers():
"""
Namespace for core foundation helpers
"""
_native_map = {}
@classmethod
def register_native_mapping(cls, type_id, callback):
"""
Register a function to convert a core foundation data type into its
equivalent in python
:param type_id:
The CFTypeId for the type
:param callback:
A callback to pass the CFType object to
"""
cls._native_map[int(type_id)] = callback
@staticmethod
def cf_number_to_number(value):
"""
Converts a CFNumber object to a python float or integer
:param value:
The CFNumber object
:return:
A python number (float or integer)
"""
type_ = CoreFoundation.CFNumberGetType(_cast_pointer_p(value))
c_type = {
1: c_byte, # kCFNumberSInt8Type
2: ctypes.c_short, # kCFNumberSInt16Type
3: ctypes.c_int32, # kCFNumberSInt32Type
4: ctypes.c_int64, # kCFNumberSInt64Type
5: ctypes.c_float, # kCFNumberFloat32Type
6: ctypes.c_double, # kCFNumberFloat64Type
7: c_byte, # kCFNumberCharType
8: ctypes.c_short, # kCFNumberShortType
9: ctypes.c_int, # kCFNumberIntType
10: c_long, # kCFNumberLongType
11: ctypes.c_longlong, # kCFNumberLongLongType
12: ctypes.c_float, # kCFNumberFloatType
13: ctypes.c_double, # kCFNumberDoubleType
14: c_long, # kCFNumberCFIndexType
15: ctypes.c_int, # kCFNumberNSIntegerType
16: ctypes.c_double, # kCFNumberCGFloatType
}[type_]
output = c_type(0)
CoreFoundation.CFNumberGetValue(_cast_pointer_p(value), type_, byref(output))
return output.value
@staticmethod
def cf_dictionary_to_dict(dictionary):
"""
Converts a CFDictionary object into a python dictionary
:param dictionary:
The CFDictionary to convert
:return:
A python dict
"""
dict_length = CoreFoundation.CFDictionaryGetCount(dictionary)
keys = (CFTypeRef * dict_length)()
values = (CFTypeRef * dict_length)()
CoreFoundation.CFDictionaryGetKeysAndValues(
dictionary,
_cast_pointer_p(keys),
_cast_pointer_p(values)
)
output = {}
for index in range(0, dict_length):
output[CFHelpers.native(keys[index])] = CFHelpers.native(values[index])
return output
@classmethod
def native(cls, value):
"""
Converts a CF* object into its python equivalent
:param value:
The CF* object to convert
:return:
The native python object
"""
type_id = CoreFoundation.CFGetTypeID(value)
if type_id in cls._native_map:
return cls._native_map[type_id](value)
else:
return value
@staticmethod
def cf_string_to_unicode(value):
"""
Creates a python unicode string from a CFString object
:param value:
The CFString to convert
:return:
A python unicode string
"""
string = CoreFoundation.CFStringGetCStringPtr(
_cast_pointer_p(value),
kCFStringEncodingUTF8
)
if string is None:
buffer = buffer_from_bytes(1024)
result = CoreFoundation.CFStringGetCString(
_cast_pointer_p(value),
buffer,
1024,
kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = byte_string_from_buffer(buffer)
if string is not None:
string = string.decode('utf-8')
return string
@staticmethod
def cf_string_from_unicode(string):
"""
Creates a CFStringRef object from a unicode string
:param string:
The unicode string to create the CFString object from
:return:
A CFStringRef
"""
return CoreFoundation.CFStringCreateWithCString(
CoreFoundation.kCFAllocatorDefault,
string.encode('utf-8'),
kCFStringEncodingUTF8
)
@staticmethod
def cf_data_to_bytes(value):
"""
Extracts a bytestring from a CFData object
:param value:
A CFData object
:return:
A byte string
"""
start = CoreFoundation.CFDataGetBytePtr(value)
num_bytes = CoreFoundation.CFDataGetLength(value)
return string_at(start, num_bytes)
@staticmethod
def cf_data_from_bytes(bytes_):
"""
Creates a CFDataRef object from a byte string
:param bytes_:
The data to create the CFData object from
:return:
A CFDataRef
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault,
bytes_,
len(bytes_)
)
@staticmethod
def cf_dictionary_from_pairs(pairs):
"""
Creates a CFDictionaryRef object from a list of 2-element tuples
representing the key and value. Each key should be a CFStringRef and each
value some sort of CF* type.
:param pairs:
A list of 2-element tuples
:return:
A CFDictionaryRef
"""
length = len(pairs)
keys = []
values = []
for pair in pairs:
key, value = pair
keys.append(key)
values.append(value)
keys = (CFStringRef * length)(*keys)
values = (CFTypeRef * length)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
_cast_pointer_p(byref(keys)),
_cast_pointer_p(byref(values)),
length,
kCFTypeDictionaryKeyCallBacks,
kCFTypeDictionaryValueCallBacks
)
@staticmethod
def cf_array_from_list(values):
"""
Creates a CFArrayRef object from a list of CF* type objects.
:param values:
A list of CF* type object
:return:
A CFArrayRef
"""
length = len(values)
values = (CFTypeRef * length)(*values)
return CoreFoundation.CFArrayCreate(
CoreFoundation.kCFAllocatorDefault,
_cast_pointer_p(byref(values)),
length,
kCFTypeArrayCallBacks
)
@staticmethod
def cf_number_from_integer(integer):
"""
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
"""
integer_as_long = c_long(integer)
return CoreFoundation.CFNumberCreate(
CoreFoundation.kCFAllocatorDefault,
kCFNumberCFIndexType,
byref(integer_as_long)
)
| 27.70122
| 112
| 0.650525
|
795303cf33f28b3556462435158b835e5e917016
| 4,276
|
py
|
Python
|
BPBackendDjango/BPBackendDjango/Views/leaderboardviews.py
|
bp-momentum/BP-backend
|
f6b4b344c2c5fae3c8bb17874771aa49a48e97ef
|
[
"MIT"
] | 3
|
2022-03-15T09:56:31.000Z
|
2022-03-15T09:56:59.000Z
|
BPBackendDjango/BPBackendDjango/Views/leaderboardviews.py
|
bp-momentum/BP-backend
|
f6b4b344c2c5fae3c8bb17874771aa49a48e97ef
|
[
"MIT"
] | 38
|
2022-01-16T18:26:10.000Z
|
2022-03-14T23:14:40.000Z
|
BPBackendDjango/BPBackendDjango/Views/leaderboardviews.py
|
bp-momentum/BP-backend
|
f6b4b344c2c5fae3c8bb17874771aa49a48e97ef
|
[
"MIT"
] | null | null | null |
import math
from rest_framework.views import APIView
from rest_framework.response import Response
from ..models import Leaderboard
from ..Helperclasses.jwttoken import JwToken
from ..Helperclasses.handlers import ErrorHandler, LeaderboardHandler
class ListLeaderboardView(APIView):
def post(self, request, *args, **kwargs):
# check if leaderboard already got resetted in this week
LeaderboardHandler.reset_leaderboard()
# checking if it contains all arguments
check = ErrorHandler.check_arguments(['Session-Token'], request.headers, ['count'], request.data)
if not check.get('valid'):
data = {
'success': False,
'description': 'Missing arguments',
'data': check.get('missing')
}
return Response(data)
req_data = dict(request.data)
token = JwToken.check_session_token(request.headers['Session-Token'])
if not token['valid']:
data = {
'success': False,
'description': 'Token is not valid',
'data': {}
}
return Response(data)
info = token['info']
leaderboard = Leaderboard.objects.order_by("-score")
out = []
rank = 0
count_of_entries = req_data['count']
count_entries = len(leaderboard)
user_index = 0
is_trainer = info["account_type"] == "trainer"
username = info["username"]
if not info['account_type'] == "user":
for i in range(0, count_of_entries):
if i >= count_entries:
continue
rank += 1
entry = LeaderboardHandler.build_entry(index=i, leaderboard=leaderboard, rank=rank, is_trainer=is_trainer,
username=username)
out.append(entry)
data = {
'success': True,
'description': 'Got the top count of users',
'data': {
"leaderboard": out
}
}
return Response(data)
for entry in leaderboard:
if entry.user.username == info['username']:
break
else:
user_index += 1
# they are just as many entries as requested
if len(leaderboard) <= count_of_entries:
for i in range(len(leaderboard)):
rank += 1
entry = LeaderboardHandler.build_entry(index=i, leaderboard=leaderboard, rank=rank, is_trainer=is_trainer,
username=username)
out.append(entry)
# user is in top count_of_series
elif user_index < math.floor(count_of_entries / 2):
for i in range(0, count_of_entries):
if i >= count_entries:
break
rank += 1
entry = LeaderboardHandler.build_entry(index=i, leaderboard=leaderboard, rank=rank, is_trainer=is_trainer,
username=username)
out.append(entry)
# user in bottom count_of_series
elif user_index > count_entries - math.ceil(count_of_entries / 2):
rank = count_entries - count_of_entries
for i in range(count_entries - count_of_entries, count_entries):
if i < 0:
continue
rank += 1
entry = LeaderboardHandler.build_entry(index=i, leaderboard=leaderboard, rank=rank, is_trainer=is_trainer,
username=username)
out.append(entry)
else:
for i in range(user_index - math.floor(count_of_entries / 2), user_index + math.ceil(count_of_entries / 2)):
rank += 1
entry = LeaderboardHandler.build_entry(index=i, leaderboard=leaderboard, rank=rank, is_trainer=is_trainer,
username=username)
out.append(entry)
data = {
'success': True,
'description': 'The Leaderboard got listed',
'data': {
'leaderboard': out
}
}
return Response(data)
| 36.237288
| 122
| 0.540225
|
79530478975ce2e4256ce556c0c0c7a8919ebc46
| 643
|
py
|
Python
|
__init__.py
|
ionicsolutions/ytterbium
|
8cc6b4f942d7040e008ecf03f58b1a241800e74f
|
[
"Apache-2.0"
] | 1
|
2022-03-16T13:26:58.000Z
|
2022-03-16T13:26:58.000Z
|
__init__.py
|
ionicsolutions/ytterbium
|
8cc6b4f942d7040e008ecf03f58b1a241800e74f
|
[
"Apache-2.0"
] | 1
|
2017-12-18T12:06:10.000Z
|
2017-12-20T17:11:21.000Z
|
__init__.py
|
ionicsolutions/ytterbium
|
8cc6b4f942d7040e008ecf03f58b1a241800e74f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# (c) 2017 Kilian Kluge
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .parallelize import *
| 37.823529
| 76
| 0.720062
|
79530569bb969510dc48cf4b0ea577ebffa66be8
| 1,382
|
py
|
Python
|
aea/__init__.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 28
|
2021-10-31T18:54:14.000Z
|
2022-03-17T13:10:43.000Z
|
aea/__init__.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 66
|
2021-10-31T11:55:48.000Z
|
2022-03-31T06:26:23.000Z
|
aea/__init__.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Contains the AEA package."""
import inspect
import os
from packaging.version import Version
import aea.crypto # triggers registry population
from aea.__version__ import (
__author__,
__copyright__,
__description__,
__license__,
__title__,
__url__,
__version__,
)
from aea.crypto.plugin import load_all_plugins
AEA_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
load_all_plugins()
def get_current_aea_version() -> Version:
"""Get current version."""
return Version(__version__)
| 28.204082
| 82
| 0.642547
|
7953057edeafeb57009948e085a9239d610ebaa5
| 12,706
|
py
|
Python
|
egs/wsj/s5/steps/tfrnnlm/train_lstm_fast.py
|
hainan-xv/kaldi
|
053a9f515fc6712d5da84ca35ab0802a1fd89588
|
[
"Apache-2.0"
] | 4
|
2017-10-02T17:59:15.000Z
|
2019-04-10T11:07:50.000Z
|
egs/wsj/s5/steps/tfrnnlm/train_lstm_fast.py
|
hainan-xv/kaldi
|
053a9f515fc6712d5da84ca35ab0802a1fd89588
|
[
"Apache-2.0"
] | 8
|
2017-01-05T18:00:41.000Z
|
2017-11-09T19:08:02.000Z
|
egs/wsj/s5/steps/tfrnnlm/train_lstm_fast.py
|
hainan-xv/kaldi
|
053a9f515fc6712d5da84ca35ab0802a1fd89588
|
[
"Apache-2.0"
] | 1
|
2017-09-29T23:52:56.000Z
|
2017-09-29T23:52:56.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
# Copyright (C) 2017 Intellisist, Inc. (Author: Hainan Xu)
# 2018 Dongji Gao
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This script trains a LSTM RNNLM with TensorFlow using a different objf than
# cross-entropy, which ensures the output of the LSTM is normalized and thus
# there is no need to normalize output during test time, hence the name "fast"
# The objf is described in http://www.danielpovey.com/files/2018_icassp_rnnlm.pdf
# to call the script, do
# python steps/tfrnnlm/train_lstm_fast.py --data-path=$datadir \
# --save-path=$savepath --vocab-path=$rnn.wordlist [--hidden-size=$size]
#
# One example recipe is at egs/ami/s5/local/tfrnnlm/run_lstm_fast.sh
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import inspect
import time
import numpy as np
import tensorflow as tf
import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_integer("hidden-size", 200, "hidden dim of RNN")
flags.DEFINE_string("data-path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("vocab-path", None,
"Where the wordlist file is stored.")
flags.DEFINE_string("save-path", None,
"Model output directory.")
flags.DEFINE_bool("use-fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
class Config(object):
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 0.8
lr_decay = 0.8
batch_size = 64
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
# This new "softmax" function we show can train a "self-normalized" RNNLM where
# the sum of the output is automatically (close to) 1.0
# which saves a lot of computation for lattice-rescoring
# The details of this function is described in http://www.danielpovey.com/files/2018_icassp_rnnlm.pdf
def new_softmax(labels, logits):
target = tf.reshape(labels, [-1])
f_logits = tf.exp(logits)
row_sums = tf.reduce_sum(f_logits, 1) # this is the negative part of the objf
t2 = tf.expand_dims(target, 1)
range = tf.expand_dims(tf.range(tf.shape(target)[0]), 1)
ind = tf.concat([range, t2], 1)
res = tf.gather_nd(logits, ind)
return -res + row_sums - 1
class RnnlmInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.rnnlm_producer(
data, batch_size, num_steps, name=name)
class RnnlmModel(object):
"""The RNNLM model."""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=1.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=1.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
self.cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = self.cell.zero_state(batch_size, data_type())
self._initial_state_single = self.cell.zero_state(1, data_type())
self.initial = tf.reshape(tf.stack(axis=0, values=self._initial_state_single), [config.num_layers, 2, 1, size], name="lat_initial_state")
lat_word_in = tf.placeholder(tf.int32, [None, 1], name="lat_word_in")
lat_state_in = tf.placeholder(tf.float32, [config.num_layers, 2, None, size], name="lat_state_in")
# unpacking the input state context
l = tf.unstack(lat_state_in, axis=0)
lat_state_in_tuple = tuple(
[tf.contrib.rnn.LSTMStateTuple(l[idx][0],l[idx][1])
for idx in range(config.num_layers)]
)
with tf.device("/cpu:0"):
self.embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data)
lat_inputs = tf.nn.embedding_lookup(self.embedding, lat_word_in)
# test time
with tf.variable_scope("RNN"):
(lat_predicted_embedding_out, lat_state_out_tuple) = self.cell(lat_inputs[:, 0, :], lat_state_in_tuple)
lat_predicted_embedding_out = tf.reshape(lat_predicted_embedding_out, [-1, size], name="lat_predicted_embedding_out")
lat_state_out = tf.reshape(tf.stack(axis=0, values=lat_state_out_tuple), [config.num_layers, 2, -1, size], name="lat_state_out")
# above is the first part of the graph for lattice rescoring
# lat-word-in
# > ---- > lat-predicted-embedding-out
# lat-state-in > lat-state-out
# below is the second part of the graph for lattice rescoring
# lat-word-out
# > prob(word | lat-predicted-embedding-in)
# lat-predicted-embedding-in
lat_word_out = tf.placeholder(tf.int32, [None, 1], name="lat_word_out")
lat_predicted_embedding_in = tf.placeholder(tf.float32, [None, size], name="lat_predicted_embedding_in")
lat_indices = tf.reshape(lat_word_out, [-1])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
softmax_b = softmax_b - 10.0 # this helps prevent training instabilities
lat_softmax_w = tf.gather(softmax_w, lat_indices, axis=1)
lat_softmax_b = tf.gather(softmax_b, lat_indices)
lat_logits = tf.diag_part(tf.matmul(lat_predicted_embedding_in, lat_softmax_w)) + lat_softmax_b
lat_out = tf.reshape(lat_logits, [-1], name="lat_out")
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > -1: tf.get_variable_scope().reuse_variables()
(predicted_embedding_output, state) = self.cell(inputs[:, time_step, :], state)
outputs.append(predicted_embedding_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())],
softmax_loss_function=new_softmax)
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
return Config()
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to RNNLM data directory")
raw_data = reader.rnnlm_raw_data(FLAGS.data_path, FLAGS.vocab_path)
train_data, valid_data, _, word_map = raw_data
config = get_config()
config.hidden_size = FLAGS.hidden_size
config.vocab_size = len(word_map)
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = RnnlmInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = RnnlmModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = RnnlmInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = RnnlmModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path)
if __name__ == "__main__":
tf.app.run()
| 35.994334
| 141
| 0.678892
|
795305e0b4e181b7a67c9d73abc8b9d69aad5ade
| 927
|
py
|
Python
|
lib/surface/compute/instance_groups/managed/instance_configs/__init__.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/instance_groups/managed/instance_configs/__init__.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/instance_groups/managed/instance_configs/__init__.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating managed instance groups."""
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class ManagedInstanceGroupsInstanceConfigs(base.Group):
"""Manage instance-specific settings of managed instance group."""
| 40.304348
| 74
| 0.785329
|
79530732b75cb8e36fb6bddf2a4b313079a633d9
| 1,195
|
py
|
Python
|
apps/news/handlers/create.py
|
hiraq/testcoil
|
f136ca6b3e9d0ce5da9f868ab63d9c7dda0f859e
|
[
"MIT"
] | null | null | null |
apps/news/handlers/create.py
|
hiraq/testcoil
|
f136ca6b3e9d0ce5da9f868ab63d9c7dda0f859e
|
[
"MIT"
] | null | null | null |
apps/news/handlers/create.py
|
hiraq/testcoil
|
f136ca6b3e9d0ce5da9f868ab63d9c7dda0f859e
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from sanic.response import json
from core.helpers import jsonapi
from apps.commons.errors import DataDuplicateError
from apps.news.models import News
from apps.news.repository import NewsRepo
from apps.news.services import CreateService
from apps.topics.models import Topic
from apps.topics.repository import TopicRepo
async def create(request):
response = {}
status = HTTPStatus.CREATED
repo = NewsRepo(News)
topicRepo = TopicRepo(Topic)
service = CreateService(request.json, repo, topicRepo)
try:
news = service.call()
response = {
'data': {
'id': str(news.id),
'type': 'news',
'attributes': {
'title': news.title,
'content': news.content,
'topics': list(map(lambda topic: topic.name, news.topics))
}
}
}
except DataDuplicateError as dup_err:
error = jsonapi.format_error(title='Data duplication', detail=dup_err.message)
response = jsonapi.return_an_error(error)
status = HTTPStatus.CONFLICT
return json(response, status=status)
| 29.875
| 87
| 0.629289
|
7953077804ddeebb41b5d12a2c0c4640b4ae4efa
| 8,071
|
py
|
Python
|
ARNet_ai2thor/faster_rcnn/datasets/ai2thor_attribute_dataset_loader.py
|
sdh9446/3D-SGG
|
1137aaa1e72a228c9208a4299d7b67c60e2a7222
|
[
"MIT"
] | null | null | null |
ARNet_ai2thor/faster_rcnn/datasets/ai2thor_attribute_dataset_loader.py
|
sdh9446/3D-SGG
|
1137aaa1e72a228c9208a4299d7b67c60e2a7222
|
[
"MIT"
] | null | null | null |
ARNet_ai2thor/faster_rcnn/datasets/ai2thor_attribute_dataset_loader.py
|
sdh9446/3D-SGG
|
1137aaa1e72a228c9208a4299d7b67c60e2a7222
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from PIL import Image
import os.path as osp
import numpy as np
import numpy.random as npr
import json
import cv2
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from fast_rcnn.config import cfg
class ai2thor_attribute_dataset(data.Dataset):
def __init__(self, set_option, image_set):
self._name = 'ai2thor_' + set_option + '_' + image_set
IMAGE_DATA_DIR = '/media/ailab/D/ai2thor'
DATA_DIR = './data'
self._image_set = image_set
self._data_path = osp.join(IMAGE_DATA_DIR, 'images')
self._set_option = set_option
# load category names and annotations
annotation_dir = DATA_DIR
cats = json.load(open(osp.join(annotation_dir, 'categories.json')))
class_weight = json.load(open(osp.join(annotation_dir, 'class_weights.json')))
self._object_classes = tuple(['__background__'] + cats['object'])
self._color_classes = tuple(cats['color']) # background 따로 없음
self._open_state_classes = tuple(cats['open_state']) # background 따로 없음
# self._off_state_classes = tuple(cats['off_state']) # background 따로 없음
self._object_class_to_ind = dict(list(zip(self.object_classes, list(range(self.num_object_classes)))))
self._color_class_to_ind = dict(list(zip(self.color_classes, list(range(self.num_color_classes))))) #
self._open_state_class_to_ind = dict(
list(zip(self.open_state_classes, list(range(self.num_open_state_classes))))) #
# self._off_state_class_to_ind = dict(zip(self.off_state_classes, xrange(self.num_off_state_classes))) #
self.class_weight_color = torch.ones(self.num_color_classes)
for idx in range(1, self.num_color_classes):
if not self._color_classes[idx] in class_weight['color']: # 만약에 해당 클래스의 weight가 없으면 1로 만듦
self.class_weight_color[idx] = 1. # 이건 train.json에 정의해둔 class가 없어서 그럼
continue
if self._color_classes[idx] != 'unknown' or self._color_classes[idx] != 'white':
self.class_weight_color[idx] = class_weight['color'][self._color_classes[idx]]
else: # unknown weight가 2000이상이라 그냥 무시하기로함, white도 750이라 무시
self.class_weight_color[idx] = 0.
self.class_weight_os = torch.ones(self.num_open_state_classes)
for idx in range(1, self.num_open_state_classes):
if not self._open_state_classes[idx] in class_weight['open_state']:
self.class_weight_os[idx] = 1.
continue
self.class_weight_os[idx] = class_weight['open_state'][self._open_state_classes[idx]]
ann_file_name = {
'ai2thor_normal_train': 'train.json',
'ai2thor_normal_test': 'test.json'
}
ann_file_path = osp.join(annotation_dir, ann_file_name[self.name])
self.annotations = json.load(open(ann_file_path))
self.tokenize_annotations()
# image transformation
# normalize = transforms.Normalize(mean=[0.352, 0.418, 0.455], std=[0.155, 0.16, 0.162]) # 기존 DB
normalize = transforms.Normalize(mean=[0.319, 0.396, 0.452],
std=[0.149, 0.155, 0.155]) # 새 DB 3507개 (181106)에 바꿈
self.transform = transforms.Compose([
transforms.ToTensor()
])
# self.transform = transforms.Compose([
# transforms.ToTensor(),
# normalize,
# ])
def __getitem__(self, index):
'''
#### test용 !!! ###
for idx, ann in enumerate(self.annotations):
if ann['path'] == '0_FloorPlan1.jpg':
index = idx
break
#####################
'''
# input data (global coordinate of objects)
# 1. 3D bbox (x, y, z, w, h, d, label)
# output data
# 1. relations (box#, box#)
# Sample random scales to use for each image in this batch
target_scale = cfg.TRAIN.SCALES[npr.randint(0, high=len(cfg.TRAIN.SCALES))]
img = cv2.imread(osp.join(self._data_path, self.annotations[index]['path'])) # 0~255
#print(osp.join(self._data_path, self.annotations[index]['path']))
try:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
except:
print(osp.join(self._data_path, self.annotations[index]['path']))
assert False, 'no file'
img, im_scale = self._image_resize(img, target_scale, cfg.TRAIN.MAX_SIZE) # 0~255
im_info = np.array([img.shape[0], img.shape[1], im_scale], dtype=np.float32)
img = Image.fromarray(img) # 0~1
if self.transform is not None:
img = self.transform(img) # convert to Tensor
_annotation = self.annotations[index]
objects = torch.zeros((len(_annotation['objects']), 5))
objects[:, 0:4] = torch.FloatTensor([obj['box'] for obj in _annotation['objects']]) * im_scale
objects[:, 4] = torch.FloatTensor([obj['class'] for obj in _annotation['objects']])
gt_colors = torch.LongTensor([obj['color'] for obj in _annotation['objects']])
# print(gt_colors.size()) ##
gt_open_states = torch.LongTensor([obj['open_state'] for obj in _annotation['objects']])
# print(gt_open_states.size()) ##
return img, im_info, objects, gt_colors, gt_open_states
def __len__(self):
return len(self.annotations)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(i)
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
file_name = self.annotations[index]['path']
image_path = osp.join(self._data_path, file_name)
assert osp.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def tokenize_annotations(self):
counter = 0
# print 'Tokenizing annotations...'
for im in self.annotations:
for obj in im['objects']:
obj['class'] = self._object_class_to_ind[obj['class']]
obj['color'] = self._color_class_to_ind[obj['color']]
obj['open_state'] = self._open_state_class_to_ind[obj['open_state']]
# obj['off_state'] = self._off_state_class_to_ind[obj['off_state']]
def _image_resize(self, im, target_size, max_size):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
@property
def name(self):
return self._name
@property
def num_object_classes(self):
return len(self._object_classes)
@property
def num_color_classes(self): #
return len(self._color_classes)
@property
def num_open_state_classes(self): #
return len(self._open_state_classes)
# @property
# def num_off_state_classes(self): #
# return len(self._off_state_classes)
@property
def object_classes(self):
return self._object_classes
@property
def color_classes(self): #
return self._color_classes
@property
def open_state_classes(self): #
return self._open_state_classes
# @property
# def off_state_classes(self): #
# return self._off_state_classes
| 38.990338
| 112
| 0.623962
|
795308154c5f335c016af92ad6467fecf17f3cf0
| 557
|
py
|
Python
|
api/open_general_licences/migrations/0002_auto_20200522_1410.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | null | null | null |
api/open_general_licences/migrations/0002_auto_20200522_1410.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | null | null | null |
api/open_general_licences/migrations/0002_auto_20200522_1410.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.12 on 2020-05-22 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("open_general_licences", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="opengenerallicence",
name="countries",
field=models.ManyToManyField(related_name="OpenGeneralLicence", to="countries.Country"),
),
migrations.AlterField(model_name="opengenerallicence", name="url", field=models.URLField(),),
]
| 27.85
| 101
| 0.657092
|
795308e0f33a5f7bca390db630aa8beadd59f72e
| 1,076
|
py
|
Python
|
app/__init__.py
|
zhangchenchen/A-clean-blog-by-flask-bootstrap
|
16f7da63efdf07780bc65b42087574463c3acd51
|
[
"Apache-2.0"
] | 1
|
2017-12-24T10:12:03.000Z
|
2017-12-24T10:12:03.000Z
|
app/__init__.py
|
zhangchenchen/A-clean-blog-by-flask-bootstrap
|
16f7da63efdf07780bc65b42087574463c3acd51
|
[
"Apache-2.0"
] | null | null | null |
app/__init__.py
|
zhangchenchen/A-clean-blog-by-flask-bootstrap
|
16f7da63efdf07780bc65b42087574463c3acd51
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from config import config
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
bootstrap = Bootstrap()
moment = Moment()
mail = Mail()
db = SQLAlchemy()
pagedown = PageDown()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
mail.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
| 25.023256
| 62
| 0.757435
|
79530aaf3e7601129f2f739babc977be8578e37c
| 1,491
|
py
|
Python
|
dex/test/test.py
|
qintangtao/python.dex
|
1dca5d45c8a1ffdd6332ae9759fceb996587d0b6
|
[
"MIT"
] | 291
|
2015-01-09T16:15:00.000Z
|
2022-01-17T20:18:02.000Z
|
dex/test/test.py
|
qintangtao/python.dex
|
1dca5d45c8a1ffdd6332ae9759fceb996587d0b6
|
[
"MIT"
] | 14
|
2015-01-23T14:08:39.000Z
|
2019-06-11T02:33:51.000Z
|
dex/test/test.py
|
qintangtao/python.dex
|
1dca5d45c8a1ffdd6332ae9759fceb996587d0b6
|
[
"MIT"
] | 41
|
2015-01-31T12:33:59.000Z
|
2020-06-27T03:21:33.000Z
|
################################################################################
#
# Copyright (c) 2012 ObjectLabs Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import unittest
from test_dex import test_dex
all_suites = [ unittest.TestLoader().loadTestsFromTestCase(test_dex) ]
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(all_suites))
| 46.59375
| 80
| 0.696177
|
79530bad52f159ca7e354545eaaeaa0f45cc55ad
| 1,842
|
py
|
Python
|
homeassistant/components/binary_sensor/nest.py
|
instantchow/home-assistant
|
6797365d4fd74328a0c9e961f652cfb37f48bc7d
|
[
"MIT"
] | null | null | null |
homeassistant/components/binary_sensor/nest.py
|
instantchow/home-assistant
|
6797365d4fd74328a0c9e961f652cfb37f48bc7d
|
[
"MIT"
] | null | null | null |
homeassistant/components/binary_sensor/nest.py
|
instantchow/home-assistant
|
6797365d4fd74328a0c9e961f652cfb37f48bc7d
|
[
"MIT"
] | null | null | null |
"""
Support for Nest Thermostat Binary Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.nest/
"""
import logging
import socket
import homeassistant.components.nest as nest
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.sensor.nest import NestSensor
DEPENDENCIES = ['nest']
BINARY_TYPES = ['fan',
'hvac_ac_state',
'hvac_aux_heater_state',
'hvac_heater_state',
'hvac_heat_x2_state',
'hvac_heat_x3_state',
'hvac_alt_heat_state',
'hvac_alt_heat_x2_state',
'hvac_emer_heat_state',
'online']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Nest binary sensors."""
logger = logging.getLogger(__name__)
try:
for structure in nest.NEST.structures:
for device in structure.devices:
for variable in config['monitored_conditions']:
if variable in BINARY_TYPES:
add_devices([NestBinarySensor(structure,
device,
variable)])
else:
logger.error('Nest sensor type: "%s" does not exist',
variable)
except socket.error:
logger.error(
"Connection error logging into the nest web service."
)
class NestBinarySensor(NestSensor, BinarySensorDevice):
"""Represents a Nest binary sensor."""
@property
def is_on(self):
"""True if the binary sensor is on."""
return bool(getattr(self.device, self.variable))
| 34.111111
| 77
| 0.584148
|
79530cc4b963fe5449406b92a773f6dbc2d39f0a
| 3,080
|
py
|
Python
|
Android/NDK/android-ndk-r20b-win/prebuilt/windows-x86_64/share/gdb/system-gdbinit/elinos.py
|
X018/CCTOOL
|
989af4d7edab82bf540400eb72eca4e7447d722c
|
[
"MIT"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
Android/NDK/android-ndk-r20b-win/prebuilt/windows-x86_64/share/gdb/system-gdbinit/elinos.py
|
X018/CCTOOL
|
989af4d7edab82bf540400eb72eca4e7447d722c
|
[
"MIT"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
Android/NDK/android-ndk-r20b-win/prebuilt/windows-x86_64/share/gdb/system-gdbinit/elinos.py
|
X018/CCTOOL
|
989af4d7edab82bf540400eb72eca4e7447d722c
|
[
"MIT"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright (C) 2011-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the ELinOS environment."""
import os
import glob
import gdb
def warn(msg):
print "warning: %s" % msg
def get_elinos_environment():
"""Return the ELinOS environment.
If the ELinOS environment is properly set up, return a dictionary
which contains:
* The path to the ELinOS project at key 'project';
* The path to the ELinOS CDK at key 'cdk';
* The ELinOS target name at key 'target' (Eg. 'i486-linux');
* A list of Xenomai install prefixes (which could be empty, if
the ELinOS project does not include Xenomai) at key 'xenomai'.
If one of these cannot be found, print a warning; the corresponding
value in the returned dictionary will be None.
"""
result = {}
for key in ("project", "cdk", "target"):
var = "ELINOS_" + key.upper()
if var in os.environ:
result[key] = os.environ[var]
else:
warn("%s not set" % var)
result[key] = None
if result["project"] is not None:
result["xenomai"] = glob.glob(result["project"] + "/xenomai-[0-9.]*")
else:
result["xenomai"] = []
return result
def elinos_init():
"""Initialize debugger environment for ELinOS.
Let the debugger know where to find the ELinOS libraries on host. This
assumes that an ELinOS environment is properly set up. If some environment
variables are missing, warn about which library may be missing.
"""
elinos_env = get_elinos_environment()
solib_dirs = []
# System libraries
if None in (elinos_env[key] for key in ("cdk", "target")):
warn("ELinOS system libraries will not be loaded")
else:
solib_prefix = "%s/%s" % (elinos_env["cdk"], elinos_env["target"])
solib_dirs += ["%s/%s" % (solib_prefix, "lib")]
gdb.execute("set solib-absolute-prefix %s" % solib_prefix)
# Xenomai libraries. Those are optional, so have a lighter warning
# if they cannot be located.
if elinos_env["project"] is None:
warn("Xenomai libraries may not be loaded")
else:
for dir in elinos_env['xenomai']:
solib_dirs += ["%s/%s"
% (dir, "xenomai-build/usr/realtime/lib")]
if len(solib_dirs) != 0:
gdb.execute("set solib-search-path %s" % ":".join(solib_dirs))
if __name__ == "__main__":
elinos_init()
| 33.478261
| 78
| 0.653571
|
79530d9dc9201209e6f12a5d435fd48828fa0427
| 3,595
|
py
|
Python
|
tests/ssh/test_ssh_protocol.py
|
vrtdev/bless
|
cdcc506bf7cf7b71883bc459b9be190ac6b92638
|
[
"Apache-2.0"
] | 1
|
2018-06-28T08:13:04.000Z
|
2018-06-28T08:13:04.000Z
|
tests/ssh/test_ssh_protocol.py
|
vrtdev/bless
|
cdcc506bf7cf7b71883bc459b9be190ac6b92638
|
[
"Apache-2.0"
] | 3
|
2017-03-06T21:34:12.000Z
|
2017-12-11T19:03:43.000Z
|
tests/ssh/test_ssh_protocol.py
|
vrtdev/bless
|
cdcc506bf7cf7b71883bc459b9be190ac6b92638
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from bless.ssh.protocol.ssh_protocol import pack_ssh_mpint, _hex_characters_length, \
pack_ssh_uint32, pack_ssh_uint64, pack_ssh_string
def test_strings():
strings = {'': '00000000'.decode('hex'), u'abc': '00000003616263'.decode('hex'),
b'1234': '0000000431323334'.decode('hex'), '1234': '0000000431323334'.decode('hex')}
for known_input, known_answer in strings.iteritems():
assert known_answer == pack_ssh_string(known_input)
def test_mpint_known_answers():
# mipint values are from https://www.ietf.org/rfc/rfc4251.txt
mpints = {long(0): '00000000'.decode('hex'),
long(0x9a378f9b2e332a7): '0000000809a378f9b2e332a7'.decode('hex'),
long(0x80): '000000020080'.decode('hex'), long(-0x1234): '00000002edcc'.decode('hex'),
long(-0xdeadbeef): '00000005ff21524111'.decode('hex')}
for known_input, known_answer in mpints.iteritems():
assert known_answer == pack_ssh_mpint(known_input)
def test_mpints():
mpints = {long(-1): '00000001ff'.decode('hex'), long(1): '0000000101'.decode('hex'),
long(127): '000000017f'.decode('hex'), long(128): '000000020080'.decode('hex'),
long(-128): '0000000180'.decode('hex'), long(-129): '00000002ff7f'.decode('hex'),
long(255): '0000000200ff'.decode('hex'), long(256): '000000020100'.decode('hex'),
long(-256): '00000002ff00'.decode('hex'), long(-257): '00000002feff'.decode('hex')}
for known_input, known_answer in mpints.iteritems():
assert known_answer == pack_ssh_mpint(known_input)
def test_hex_characters_length():
digits = {0: 0, 1: 2, 64: 2, 127: 2, 128: 4, 16384: 4, 32767: 4, 32768: 6, -1: 2,
long(-0x1234): 4, long(-0xdeadbeef): 10, -128: 2}
for known_input, known_answer in digits.iteritems():
assert known_answer == _hex_characters_length(known_input)
def test_uint32():
uint32s = {0x00: '00000000'.decode('hex'), 0x0a: '0000000a'.decode('hex'),
0xab: '000000ab'.decode('hex'), 0xabcd: '0000abcd'.decode('hex'),
0xabcdef: '00abcdef'.decode('hex'), 0xffffffff: 'ffffffff'.decode('hex'),
0xf0f0f0f0: 'f0f0f0f0'.decode('hex'), 0x0f0f0f0f: '0f0f0f0f'.decode('hex')}
for known_input, known_answer in uint32s.iteritems():
assert known_answer == pack_ssh_uint32(known_input)
def test_uint64():
uint64s = {0x00: '0000000000000000'.decode('hex'), 0x0a: '000000000000000a'.decode('hex'),
0xab: '00000000000000ab'.decode('hex'), 0xabcd: '000000000000abcd'.decode('hex'),
0xabcdef: '0000000000abcdef'.decode('hex'),
0xffffffff: '00000000ffffffff'.decode('hex'),
0xf0f0f0f0: '00000000f0f0f0f0'.decode('hex'),
0x0f0f0f0f: '000000000f0f0f0f'.decode('hex'),
0xf0f0f0f000000000: 'f0f0f0f000000000'.decode('hex'),
0x0f0f0f0f00000000: '0f0f0f0f00000000'.decode('hex'),
0xffffffffffffffff: 'ffffffffffffffff'.decode('hex')}
for known_input, known_answer in uint64s.iteritems():
assert known_answer == pack_ssh_uint64(known_input)
def test_floats():
with pytest.raises(TypeError):
pack_ssh_uint64(4.2)
with pytest.raises(TypeError):
pack_ssh_uint32(4.2)
def test_uint_too_long():
with pytest.raises(ValueError):
pack_ssh_uint64(0x1FFFFFFFFFFFFFFFF)
with pytest.raises(ValueError):
pack_ssh_uint32(long(0x1FFFFFFFF))
with pytest.raises(ValueError):
pack_ssh_uint32(int(0x1FFFFFFFF))
| 43.313253
| 100
| 0.651182
|
79530dba1f819e9200d230e72e5a216260d5fd4f
| 431
|
py
|
Python
|
main.py
|
RiyanDcosta/Python_Globals
|
12be4baebc00d9cd5a51493d4eddcef307a6fd3f
|
[
"BSD-3-Clause"
] | 1
|
2021-02-02T04:31:45.000Z
|
2021-02-02T04:31:45.000Z
|
main.py
|
RiyanDcosta/Python_Globals
|
12be4baebc00d9cd5a51493d4eddcef307a6fd3f
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
RiyanDcosta/Python_Globals
|
12be4baebc00d9cd5a51493d4eddcef307a6fd3f
|
[
"BSD-3-Clause"
] | 1
|
2021-02-02T04:31:50.000Z
|
2021-02-02T04:31:50.000Z
|
# Copyright (c) <year>, <copyright holder>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from classes import T1, T2
import time
def exec_func():
t1 = T1()
t2 = T2()
t1.get()
time.sleep(1)
t2.get()
time.sleep(1)
t1.get()
if __name__ == '__main__':
exec_func()
| 17.24
| 72
| 0.605568
|
79530dbe22c1fd9f9495f321129088a815727720
| 1,974
|
py
|
Python
|
ThesisAnalysis/scripts/reduction/spe_spectrum.py
|
watsonjj/ThesisAnalysis
|
5bfae5700dd953fe5f44c56a0cf0a34241dd4c17
|
[
"BSD-3-Clause"
] | null | null | null |
ThesisAnalysis/scripts/reduction/spe_spectrum.py
|
watsonjj/ThesisAnalysis
|
5bfae5700dd953fe5f44c56a0cf0a34241dd4c17
|
[
"BSD-3-Clause"
] | null | null | null |
ThesisAnalysis/scripts/reduction/spe_spectrum.py
|
watsonjj/ThesisAnalysis
|
5bfae5700dd953fe5f44c56a0cf0a34241dd4c17
|
[
"BSD-3-Clause"
] | null | null | null |
from ThesisAnalysis import get_data, ThesisHDF5Writer
from ThesisAnalysis.files import spe_files
import numpy as np
import pandas as pd
from CHECLabPy.core.io import DL1Reader
from CHECLabPy.spectrum_fitters.gentile import GentileFitter
import warnings
from pandas.errors import PerformanceWarning
def process(file):
name = file.__class__.__name__
input_paths = file.spe_files
config_path = file.spe_config_path
poi = file.poi
output_path = get_data("spe/{}_spe_spectrum.h5".format(name))
readers = [DL1Reader(path) for path in input_paths]
n_illuminations = len(readers)
mapping = readers[0].mapping
fitter = GentileFitter(n_illuminations, config_path)
charges = []
for reader in readers:
pixel, charge = reader.select_columns(['pixel', 'charge'])
if poi != -1:
charge_p = charge[pixel == poi]
else:
charge_p = charge
charges.append(charge_p)
fitter.apply(*charges)
fitx = np.linspace(fitter.range[0], fitter.range[1], 1000)
coeff = fitter.coeff.copy()
errors = fitter.errors.copy()
d = dict(
edges=fitter.edges,
between=fitter.between,
fitx=fitx
)
for i in range(n_illuminations):
d["hist{}".format(i)] = fitter.hist[i]
d["fit{}".format(i)] = fitter.fit_function(fitx, **coeff)[i]
df_array = pd.DataFrame([d])
df_coeff = pd.DataFrame(coeff, index=[0])
df_errors = pd.DataFrame(errors, index=[0])
with warnings.catch_warnings():
warnings.simplefilter('ignore', PerformanceWarning)
with ThesisHDF5Writer(output_path) as writer:
writer.write(
array=df_array,
coeff=df_coeff,
errors=df_errors,
)
writer.write_mapping(mapping)
writer.write_metadata(n_illuminations=n_illuminations)
def main():
[process(f) for f in spe_files]
if __name__ == '__main__':
main()
| 28.608696
| 68
| 0.651469
|
79531013fba593787a179c8562b8f3a2fdf04bbb
| 1,037
|
py
|
Python
|
onnxruntime/__init__.py
|
csteegz/onnxruntime
|
a36810471b346ec862ac6e4de7f877653f49525e
|
[
"MIT"
] | 1
|
2020-07-12T15:23:49.000Z
|
2020-07-12T15:23:49.000Z
|
onnxruntime/__init__.py
|
ajinkya933/onnxruntime
|
0e799a03f2a99da6a1b87a2cd37facb420c482aa
|
[
"MIT"
] | null | null | null |
onnxruntime/__init__.py
|
ajinkya933/onnxruntime
|
0e799a03f2a99da6a1b87a2cd37facb420c482aa
|
[
"MIT"
] | 1
|
2020-09-09T06:55:51.000Z
|
2020-09-09T06:55:51.000Z
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
"""
ONNX Runtime
enables high-performance evaluation of trained machine learning (ML)
models while keeping resource usage low.
Building on Microsoft's dedication to the
`Open Neural Network Exchange (ONNX) <https://onnx.ai/>`_
community, it supports traditional ML models as well
as Deep Learning algorithms in the
`ONNX-ML format <https://github.com/onnx/onnx/blob/master/docs/IR.md>`_.
"""
__version__ = "0.5.0"
__author__ = "Microsoft"
from onnxruntime.capi import onnxruntime_validation
onnxruntime_validation.check_distro_info()
from onnxruntime.capi.session import InferenceSession
from onnxruntime.capi._pybind_state import get_all_providers, get_available_providers, get_device, RunOptions, SessionOptions, set_default_logger_severity, NodeArg, ModelMetadata, GraphOptimizationLevel
| 47.136364
| 202
| 0.700096
|
7953119df8487c9daf0617cf940da3c546c51bc0
| 994
|
py
|
Python
|
manage.py
|
wooyek/secure-share
|
b3b1dd6a03dc278e881e866a5554254523d33a81
|
[
"MIT"
] | null | null | null |
manage.py
|
wooyek/secure-share
|
b3b1dd6a03dc278e881e866a5554254523d33a81
|
[
"MIT"
] | 1
|
2020-05-01T10:53:25.000Z
|
2020-05-01T10:53:25.000Z
|
manage.py
|
wooyek/secure-share
|
b3b1dd6a03dc278e881e866a5554254523d33a81
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import logging
import os
import sys
from pathlib import Path
logging.basicConfig(
format='%(asctime)s %(levelname)-7s %(thread)-5d %(filename)s:%(lineno)s | %(funcName)s | %(message)s',
# format='%(asctime)s %(levelname)-7s %(thread)-5d %(name)s %(pathname)s:%(lineno)s | %(funcName)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
logging.getLogger().setLevel(logging.DEBUG)
logging.disable(logging.NOTSET)
logging.getLogger("PIL.Image").setLevel(logging.WARNING)
logging.getLogger('environ').setLevel(logging.INFO)
logging.debug("Importing: %s" % __file__)
SRC_PATH = str(Path(__file__).parent / 'src')
if __name__ == "__main__":
if SRC_PATH not in sys.path:
sys.path.append(SRC_PATH)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 34.275862
| 118
| 0.716298
|
795311c0283eb8c807d539e54cb603b1dc5c2382
| 117,658
|
py
|
Python
|
theano/gpuarray/elemwise.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gpuarray/elemwise.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gpuarray/elemwise.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
from io import StringIO
import numpy as np
from theano import scalar
from theano.gof.graph import Apply
from theano.gof.op import Op
from theano.gof.utils import MethodNotDefined
from theano.link.c.interface import HideC
from theano.scalar import Composite, Scalar
from theano.scalar.basic import complex_types, upgrade_to_float_no_complex
from theano.scalar.basic_scipy import Erfcinv, Erfinv
from theano.tensor.elemwise import CAReduceDtype, DimShuffle, Elemwise
try:
import pygpu
from pygpu import gpuarray
from pygpu.gpuarray import dtype_to_typecode
from pygpu.reduction import ReductionKernel
from pygpu.tools import ArrayArg
except ImportError:
pass
from .basic_ops import GpuKernelBase, Kernel, as_gpuarray_variable, infer_context_name
from .fp16_help import load_w, write_w
from .type import GpuArrayType, gpu_context_type
def make_argument(v, name):
return ArrayArg(np.dtype(v.type.dtype), name)
def as_C_string_const(s):
return "\n".join('"%s\\n"' % (l.replace('"', '\\"')) for l in s.split("\n"))
def get_scal(dt):
if dt == "float16":
dt = "float32"
return scalar.get_scalar_type(dt)
def max_inputs_to_GpuElemwise(node_or_outputs):
"""
Compute the maximum number of inputs that fit in a kernel call.
"""
if isinstance(node_or_outputs, Apply):
outputs = node_or_outputs.outputs
else:
outputs = node_or_outputs
n_out = len(outputs)
ndim = outputs[0].type.ndim
ptr_size = 8
# Even with call32, the interface does not change, and shapes,
# strides, and offset are passed as 64-bits (8 bytes)
int_size = 8
# we take the limit from CUDA for now
nb_bytes_total = 4096
# Regardless of the number of arguments, we have:
# - The total number of elements (int)
# - The shape (int) on each dimension
fixed_size = int_size + int_size * ndim
# Each argument (input or output) has:
# - 1 pointer (ptr)
# - 1 offset (int)
# - 1 stride (int) per dimension
# Even if the tensor ends up being contiguous, code for the
# non-contiguous case still needs to be generated.
param_size = ptr_size + int_size + int_size * ndim
# Remaining for inputs
nb_bytes_for_inputs = nb_bytes_total - fixed_size - param_size * n_out
# Maximum number of inputs
max_nb_inputs = nb_bytes_for_inputs // param_size
return max_nb_inputs
class GpuElemwise(HideC, Elemwise):
"""
Elemwise on the GPU.
"""
params_type = gpu_context_type
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
_f16_ok = True
def __str__(self):
if self.name is not None:
return self.name
items = str(sorted(self.inplace_pattern.items()))
return f"GpuElemwise{{{self.scalar_op}}}{items}<gpuarray>"
def max_inputs(self, node_or_outputs):
return max_inputs_to_GpuElemwise(node_or_outputs)
def make_node(self, *inputs):
ctx_name = infer_context_name(*inputs)
inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs]
out_info = Elemwise.get_output_info(self, GpuDimShuffle, *inputs)
inputs = out_info[2]
outputs = [
GpuArrayType(broadcastable=br, context_name=ctx_name, dtype=dtype)()
for dtype, br in zip(out_info[0], out_info[1])
]
if len(outputs) > 1:
raise NotImplementedError()
if len(inputs) > max_inputs_to_GpuElemwise(outputs):
raise NotImplementedError(
"Can not make this GpuElemwise with that much inputs"
)
# Try to generate the kernel to catch SupportCodeErrors
scal_ins = [get_scal(i.dtype) for i in inputs]
fake_node = self.scalar_op.make_node(*[i() for i in scal_ins])
try:
code = fake_node.op.c_support_code_apply(fake_node, "test")
if code:
raise SupportCodeError(code)
except MethodNotDefined:
pass
try:
support_code = fake_node.op.c_support_code()
if "struct" in support_code:
# The macro is fine, the C++ struct is not.
raise SupportCodeError(
"struct aren't supported in GpuElemwise support_code" + support_code
)
except MethodNotDefined:
pass
node = Apply(self, inputs, outputs)
return node
def get_params(self, node):
return node.inputs[0].type.context
def _get_vnames(self, node):
inps = [f"i{n}" for n, _ in enumerate(node.inputs)]
outs = [
f"o{n}" if n not in self.inplace_pattern else inps[self.inplace_pattern[n]]
for n, _ in enumerate(node.outputs)
]
return inps, outs
def _generate_op_string(self, node):
inps, outs = self._get_vnames(node)
scal_v_ins = [get_scal(i.dtype)() for i in node.inputs]
# As float16 isn't a c type and most GPU don't compute on it,
# We convert the computation to float32, and let libgpuarray
# load in float16 and cast to float32 and do the reverse for
# the output.
scalar_op = self.scalar_op
if isinstance(scalar_op, (scalar.Cast, Composite)):
scalar_op = scalar_op.clone_float32()
fake_node = scalar_op.make_node(*scal_v_ins)
scal_v_out = fake_node.outputs
assert len(scal_v_out) == len(node.outputs)
try:
kop = fake_node.op.c_code(
fake_node, "elem_scalar", inps, outs, dict(fail="return;")
)
except MethodNotDefined:
raise AssertionError(
"No c code for this scalar. Can not make a GpuElemwise"
)
# If the following assert fail, then we need to update the
# code handler above.
assert "npy_float16" not in kop
support_code = ""
try:
# We accept only some c_support_code().
# This filter is done in the make_node()
support_code += fake_node.op.c_support_code()
except MethodNotDefined:
pass
for npy, ga in [
("npy_bool", "ga_bool"),
("npy_uint8", "ga_ubyte"),
("npy_uint16", "ga_ushort"),
("npy_uint32", "ga_uint"),
("npy_uint64", "ga_ulong"),
("npy_int8", "ga_byte"),
("npy_int16", "ga_short"),
("npy_int32", "ga_int"),
("npy_int64", "ga_long"),
("npy_float16", "ga_half"),
("npy_float32", "ga_float"),
("npy_float64", "ga_double"),
]:
kop = kop.replace(npy, ga)
return support_code, kop
def c_headers(self):
return ["<numpy_compat.h>", "<gpuarray/types.h>", "<gpuarray/elemwise.h>"]
def c_support_code_struct(self, node, name):
return "\nGpuElemwise *ge;\n"
def c_init_code_struct(self, node, name, sub):
inps, outs = self._get_vnames(node)
nargs = len(inps) + len(outs) - len(self.inplace_pattern)
support_code, kop = self._generate_op_string(node)
res = """
gpuelemwise_arg args[%(nargs)s] = {{0}};
""" % dict(
nargs=nargs
)
for n, (i, name) in enumerate(zip(node.inputs, inps)):
res += """
args[%(n)s].name = %(name)s;
args[%(n)s].typecode = %(typecode)s;
args[%(n)s].flags = GE_READ;
""" % dict(
n=n, name='"{}"'.format(name), typecode=i.type.typecode
)
p = len(inps)
for n, o in enumerate(node.outputs):
if n in self.inplace_pattern:
assert len(node.outputs) == 1
res += "\nargs[%(n)s].flags |= GE_WRITE;\n" % dict(
n=self.inplace_pattern[n]
)
else:
res += """
args[%(n)s].name = %(name)s;
args[%(n)s].typecode = %(typecode)s;
args[%(n)s].flags = GE_WRITE;
""" % dict(
n=p, name='"{}"'.format(outs[n]), typecode=o.type.typecode
)
p += 1
res += """
ge = GpuElemwise_new(%(ctx)s->ctx, %(support)s, %(kop)s, %(nargs)s, args, %(nd)s, GE_CONVERT_F16);
if (ge == NULL) {
PyErr_SetString(PyExc_RuntimeError, "Could not initialize elemwise support");
%(fail)s
}
""" % dict(
nargs=nargs,
ctx=sub["params"],
fail=sub["fail"],
support=as_C_string_const(support_code),
kop=as_C_string_const(kop),
nd=node.inputs[0].ndim,
)
return res
def c_cleanup_code_struct(self, node, name):
return """
GpuElemwise_free(ge);
"""
def c_code(self, node, name, inputs, outputs, sub):
nd = node.outputs[0].ndim
fail = sub["fail"]
initial_dims = ",".join("1" for i in range(nd))
opname = str(self.scalar_op)
ctx = sub["params"]
nargs = len(node.inputs) + len(node.outputs) - len(self.inplace_pattern)
# check that all inputs have valid dimensions
emitted_inames = {}
code = (
"""
// +1 is so that MSVC is happy when nd == 0
size_t dims[%(nd)s+1] = {%(initial_dims)s};
void *rargs[%(nargs)s] = {0};
int err;
"""
% locals()
)
for idx, iname in enumerate(inputs):
if iname in emitted_inames:
assert emitted_inames[iname] is node.inputs[idx]
continue
broadcasts = map(int, node.inputs[idx].broadcastable)
broadcasts = ", ".join(map(str, broadcasts))
nd = node.inputs[idx].ndim
code += (
"""
int broadcasts_%(iname)s[%(nd)s+1] = {%(broadcasts)s};
"""
% locals()
)
emitted_inames[iname] = node.inputs[idx]
# check that all inputs have valid dimensions
emitted_inames = {}
for idx, iname in enumerate(inputs):
code += f"rargs[{idx}] = &{iname}->ga;\n"
if iname in emitted_inames:
continue
code += (
"""
if (%(nd)s != PyGpuArray_NDIM(%(iname)s))
{
PyErr_Format(PyExc_TypeError,
"need %(nd)s dims, not %%u",
PyGpuArray_NDIM(%(iname)s));
%(fail)s;
}
for (int i = 0; i< %(nd)s; ++i)
{
dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i];
if ((!(broadcasts_%(iname)s[i] &&
PyGpuArray_DIMS(%(iname)s)[i] == 1)) &&
(dims[i] != PyGpuArray_DIMS(%(iname)s)[i]))
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" %(idx)d (indices start at 0) has shape[%%d] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(iname)s)[i],
(unsigned long long)dims[i]
);
%(fail)s;
}
}
"""
% locals()
)
emitted_inames[iname] = True
# check that all outputs have valid dimensions
p = len(node.inputs)
for idx, oname in enumerate(outputs):
typecode = dtype_to_typecode(node.outputs[idx].dtype)
if idx not in self.inplace_pattern.keys():
code += (
"""
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
Py_DECREF(%(oname)s);
%(oname)s = NULL;
}
}
if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS))
{
Py_XDECREF(%(oname)s);
%(oname)s = NULL;
}
if (NULL == %(oname)s)
{
%(oname)s = pygpu_empty(%(nd)d, dims,
%(typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(oname)s) {
%(fail)s
}
}
rargs[%(p)s] = &%(oname)s->ga;
"""
% locals()
)
p += 1
else:
input_idx = self.inplace_pattern[idx]
iname = inputs[input_idx]
code += (
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_INCREF(%(oname)s);
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" %(idx)d (indices start at 0), working inplace"
" on input %(input_idx)s, has shape[%%i] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(oname)s)[i],
(unsigned long long)dims[i]
);
Py_DECREF(%(oname)s);
%(oname)s = NULL;
%(fail)s;
}
}
"""
% locals()
)
code += """
if (GpuElemwise_call(ge, rargs, GE_BROADCAST) != GA_NO_ERROR) {
PyErr_SetString(PyExc_RuntimeError, "Error in the elemwise call");
%(fail)s
}
""" % dict(
fail=sub["fail"]
)
return str(code)
# To disable the superclass perform.
perform = Op.perform
# Since we don't have a perform ...
def python_constant_folding(self, node):
return False
def c_code_cache_version(self):
ver = self.scalar_op.c_code_cache_version()
if ver:
return (10, ver)
else:
return ver
class SupportCodeError(Exception):
"""
We do not support certain things (such as the C++ complex struct).
"""
class GpuDimShuffle(DimShuffle):
"""
DimShuffle on the GPU.
"""
_f16_ok = True
c_func_name = "APPLY_SPECIFIC(gpu_dimshuffle)"
def make_node(self, input):
ctx_name = infer_context_name(input)
res = DimShuffle.make_node(self, input)
otype = GpuArrayType(
dtype=res.outputs[0].type.dtype,
broadcastable=res.outputs[0].type.broadcastable,
context_name=ctx_name,
)
input = as_gpuarray_variable(input, ctx_name)
return Apply(self, [input], [otype()])
def __str__(self):
if self.inplace:
s = "InplaceGpuDimShuffle{%s}"
else:
s = "GpuDimShuffle{%s}"
return s % (",".join(str(x) for x in self.new_order))
def perform(self, node, inp, out, params):
(input,) = inp
(storage,) = out
res = input
res = res.transpose(self.shuffle + self.drop)
shape = list(res.shape[: len(self.shuffle)])
for augm in self.augment:
shape.insert(augm, 1)
res = res.reshape(shape)
if not self.inplace:
res = res.copy()
storage[0] = res
class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype):
"""
GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.
Parameters
----------
reduce_mask
The dimensions along which to reduce. The `reduce_mask` is a tuple of
booleans (actually integers 0 or 1) that specify for each input
dimension, whether to reduce it (1) or not (0).
pre_scalar_op
If present, must be a scalar op with only 1 input. We will execute it
on the input value before reduction.
Examples
--------
When scalar_op is a theano.scalar.basic.Add instance:
- reduce_mask == (1,) sums a vector to a scalar
- reduce_mask == (1,0) computes the sum of each column in a matrix
- reduce_mask == (0,1) computes the sum of each row in a matrix
- reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor.
Notes
-----
Any reduce_mask of all zeros is a sort of 'copy', and may be removed during
graph optimization.
This Op is a work in progress.
This op was recently upgraded from just GpuSum a general CAReduce. Not
many code cases are supported for scalar_op being anything other than
scalar.Add instances yet.
Important note: if you implement new cases for this op, be sure to
benchmark them and make sure that they actually result in a speedup.
GPUs are not especially well-suited to reduction operations so it is
quite possible that the GPU might be slower for some cases.
"""
__props__ = (
"axis",
"reduce_mask",
"dtype",
"acc_dtype",
"scalar_op",
"pre_scalar_op",
)
_f16_ok = True
verbose = 0
def __init__(
self,
scalar_op,
axis=None,
reduce_mask=None,
dtype=None,
acc_dtype=None,
pre_scalar_op=None,
):
if reduce_mask is not None:
reduce_mask = tuple(reduce_mask)
self.reduce_mask = reduce_mask
# used to make sure that calls to scalar op
# have unique name arguments
self._n_scalar_op_calls = 0
CAReduceDtype.__init__(
self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype
)
self.pre_scalar_op = pre_scalar_op
if pre_scalar_op:
assert pre_scalar_op.nin == 1
def __str__(self):
pre = ""
if self.pre_scalar_op:
pre = f"pre={self.pre_scalar_op},red="
ax = ""
if self.axis is not None:
ax = f"{{{', '.join(str(x) for x in self.axis)}}}"
return f"GpuCAReduceCuda{{{pre}{str(self.scalar_op)}}}{ax}"
def __setstate__(self, d):
self.__dict__.update(d)
# For unpickling of old ops.
if not hasattr(self, "pre_scalar_op"):
self.pre_scalar_op = None
def make_node(self, x):
x = as_gpuarray_variable(x, infer_context_name(x))
if x.type.context.kind != b"cuda":
raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices")
ret = super().make_node(x)
self = copy.copy(self)
self.axis = ret.op.axis
if self.pre_scalar_op:
# Currently we only tested pre_scalar_op that don't cause
# upcast.
assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype
if self.reduce_mask is None:
if self.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in self.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
self.reduce_mask = tuple(reduce_mask)
if x.type.ndim != len(self.reduce_mask):
raise TypeError(f"x must have rank {len(self.reduce_mask)}")
if (
"complex" in x.dtype
or "complex" in ret.outputs[0].dtype
or "complex" in self._acc_dtype(x.dtype)
):
raise NotImplementedError("We don't support complex in gpu reduction")
return Apply(
self,
[x],
[
GpuArrayType(
ret.outputs[0].dtype,
ret.outputs[0].type.broadcastable,
context_name=x.type.context_name,
)()
],
)
def perform(self, node, inp, out, ctx):
Op.perform(self, node, inp, out, ctx)
def supports_c_code(self, inputs):
"""
Returns True if the current op and reduce pattern has functioning C code.
"""
# If we don't even have the right method, we certainly
# don't support the C code
# (This is the test that used to be implemented by
# local_gpu_sum)
pattern = "".join(str(i) for i in self.reduce_mask)
if not hasattr(self, f"c_code_reduce_{pattern}"):
return False
# Now that this is a general reduction op, we might
# have a method for a pattern, but that pattern
# might not be implemented for the current scalar op.
# To detect this more complicated situation, we
# make fake arguments to c_code, try to run them,
# and see if NotImplementedError gets raised.
node = self.make_node(*inputs)
name = "fake_name"
inp = [f"fake_input_name_{i}" for i in range(len(inputs))]
out = [f"fake_output_name_{i}" for i in range(len(node.outputs))]
sub = {"fail": "fake failure code", "params": "fake context"}
try:
self.c_code(node, name, inp, out, sub)
if not self.gpu_kernels(node, name):
return False
except NotImplementedError:
return False
return True
def c_headers(self):
return ["<numpy_compat.h>", "<gpuarray/types.h>"]
def c_support_code(self):
return """
template <typename T>
static T ceil_intdiv(T a, T b)
{
return (a/b) + ((a % b) ? 1: 0);
}
"""
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
nd_in = node.inputs[0].type.ndim
nd_out = node.outputs[0].type.ndim
# For complex, we need to use theano_complex* in the c code to
# have it run. But libgpuarray don't understand it.
in_dtype = node.inputs[0].type.dtype_specs()[1]
out_dtype = node.outputs[0].type.dtype_specs()[1]
gin_dtype = "npy_" + node.inputs[0].dtype
gout_dtype = "npy_" + node.outputs[0].dtype
assert nd_in - nd_out == sum(self.reduce_mask)
sio = StringIO()
fail = sub["fail"]
ctx = sub["params"]
# check input
print(
"""
if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s)
{
PyErr_Format(PyExc_TypeError,
"required nd=%(nd_in)s, got nd=%%u", PyGpuArray_NDIM(%(x)s));
%(fail)s;
}
"""
% locals(),
file=sio,
)
# It might be nice to use a property of the op class to do this,
# but tensor.elemwise.CAReduce has this exact same check so I guess
# this is OK to do
if self.scalar_op in [scalar.scalar_minimum, scalar.scalar_maximum]:
conds = [
f"(PyGpuArray_DIMS({x})[{i}] == 0)"
for i in range(nd_in)
if self.reduce_mask[i]
]
assert len(conds) > 0
cond = "(" + " || ".join(conds) + ")"
print(
"""
if %(cond)s
{
PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis.");
%(fail)s;
}
"""
% locals(),
file=sio,
)
#
# alloc an output if we need one
#
# check the basics of out output
print(
f"""
if ( !{z}
|| (PyGpuArray_NDIM({z}) != {nd_out})
""",
file=sio,
)
# ensure that the output has the right non-reduced dimensions
j = 0
for i in range(nd_in):
if not self.reduce_mask[i]:
print(
" || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) "
% locals(),
file=sio,
)
j += 1
print(
"""
)
{
"""
% locals(),
file=sio,
)
if nd_out > 0:
print(f"size_t new_dims[{nd_out}]; ", file=sio)
else:
print("size_t *new_dims=NULL; ", file=sio)
j = 0
for i in range(nd_in):
if not self.reduce_mask[i]:
print(
f"new_dims[{j}] = PyGpuArray_DIMS({x})[{i}];",
file=sio,
)
j += 1
out_typecode = dtype_to_typecode(gout_dtype[4:])
print(
"""
Py_XDECREF(%(z)s);
%(z)s = pygpu_empty(%(nd_out)s, new_dims,
%(out_typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (NULL == %(z)s)
{
PyErr_Format(PyExc_RuntimeError, "Failed to allocate output");
%(fail)s;
}
}
"""
% locals(),
file=sio,
)
# \begin bracket the reduction in a check that there is
# actually work to do
if getattr(self.scalar_op, "identity", None) == 0:
zero_shp = f"GpuArray_memset(&{z}->ga, 0)"
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
scalar_op = self.scalar_op
zero_shp = (
"""
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0"
" for this scalar_op: %(scalar_op)s");
%(fail)s;
"""
% locals()
)
print(
"""
if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){
%(zero_shp)s;
}
else if (PyGpuArray_SIZE(%(z)s))
{
"""
% locals(),
file=sio,
)
#
# Now perform the reduction
#
if all(i == 1 for i in self.reduce_mask):
# check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code.
# TODO: check if we are ccontiguous when we un-dimshuffle
# TODO: if only some dims are ccontiguous, call version with less dims.
print("if(%(x)s->ga.flags & GA_C_CONTIGUOUS){" % locals(), file=sio)
self.c_code_reduce_ccontig(sio, node, name, x, z, fail)
print("}else{", file=sio)
getattr(self, f"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}")(
sio, node, name, x, z, fail
)
print("}", file=sio)
else:
getattr(self, f"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}")(
sio, node, name, x, z, fail
)
# \end bracket the reduction ...
print(
"""
}
"""
% locals(),
file=sio,
)
return sio.getvalue()
def _makecall(
self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()
):
"""
Return a string for making a kernel call.
The return value looks something like:
.. code-block:: c
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
if (verbose)
printf("running kernel_reduce_10_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = {
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0,
(void *)&stride_A1,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
sio = StringIO()
if pattern is None:
pattern = "".join(str(c) for c in self.reduce_mask)
ndim = len(self.reduce_mask)
nd_out = ndim - sum(self.reduce_mask)
shapes_format = f"shape=({','.join(['%llu'] * node.inputs[0].ndim)})"
shapes_data = ",".join(
[f"(size_t) PyGpuArray_DIMS({x})[{i}]" for i in range(node.inputs[0].ndim)]
)
k_var = f"kernel_reduce_{pattern}_{name}"
params = []
for i in range(ndim):
params.append(f"(void *)&PyGpuArray_DIMS({x})[{i}]")
for declaration, value in extra_dims:
print(declaration % locals(), file=sio)
params.append(value)
params.append(f"(void *){x}->ga.data")
params.append(f"(void *)&{x}->ga.offset")
for i in range(ndim):
print(
"""
ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s);
"""
% locals(),
file=sio,
)
params.append("(void *)&stride_A%(i)d" % locals())
for declaration, value in extra_strides:
print(declaration % locals(), file=sio)
params.append(value)
params.append(f"(void *){z}->ga.data")
params.append(f"(void *)&{z}->ga.offset")
for i in range(nd_out):
print(
"""
ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s);
"""
% locals(),
file=sio,
)
params.append("(void *)&stride_Z%(i)d" % locals())
kernel_params = ", ".join(params)
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
if (verbose)
printf("running kernel_reduce_%(pattern)s_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = { %(kernel_params)s };
if (verbose>1)
printf("n_threads[0]=%%lu, n_threads[1]=%%lu, "
"n_threads[2]=%%lu, n_threads=%%lu, "
"n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, "
"n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\n",
n_threads[0],n_threads[1],
n_threads[2],
n_threads[0]*n_threads[1]*
n_threads[2],
n_blocks[0],n_blocks[1],n_blocks[2],
n_blocks[0]*n_blocks[1]*n_blocks[2],
n_shared, %(shapes_data)s);
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
"""
% locals(),
file=sio,
)
return sio.getvalue()
def _k_decl(self, node, nodename, pattern=None, ndim=None, reduce_mask=None):
"""
Return a string to declare a kernel function.
The result will look something like this:
.. code-block:: c
KERNEL void kernel_reduce_110_%(nodename)s(
const ga_size d0,
const ga_size d1,
const ga_size d2,
const %(in_type)s *A,
const ga_size offset_A,
const ga_ssize sA0,
const ga_ssize sA1,
const ga_ssize sA2,
%(out_type)s * Z,
const ga_size offset_Z,
const ga_ssize sZ0)
Since the nodename is unique, we don't need to put the name
of the scalar_op in here.
"""
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
if reduce_mask is None:
reduce_mask = self.reduce_mask
if ndim is None:
ndim = len(reduce_mask)
if pattern is None:
pattern = "".join(str(i) for i in reduce_mask)
kname = f"kernel_reduce_{pattern}"
k_var = f"kernel_reduce_{pattern}_{nodename}"
params = []
sio = StringIO()
print(
f"""
KERNEL void {kname}(
""",
file=sio,
)
for i in range(ndim):
params.append("uintp")
print(
f"""
const ga_size d{i},
""",
file=sio,
)
params.append(gpuarray.GpuArray)
params.append("uintp")
print(
f"""
const {in_type} *A, const ga_size offset_A,
""",
file=sio,
)
for i in range(ndim):
params.append("intp")
print(
f"""
const ga_ssize sA{i},
""",
file=sio,
)
params.append(gpuarray.GpuArray)
params.append("uintp")
print(
f"""
{out_type} * Z, const ga_size offset_Z
""",
file=sio,
)
for i in range(ndim - sum(reduce_mask)):
params.append("intp")
print(
f"""
, const ga_ssize sZ{i}
""",
file=sio,
)
print(")", file=sio)
return sio.getvalue(), kname, params, k_var
def _k_init(self, node, nodename):
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
# We need to use theano_complex* and not npy_complex*
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
return (
"""
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y
+ threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = 0;
"""
% locals()
)
def _assign_init(self, first_item, dtype):
"""
This return the initial value for myresult.
If the scalar op have an identity value, return it.
Otherwise, check that the scalar op is maximum or minimum
and return first_item. It should be the first element of the reduction.
As the maximum and minimum of the same value don't change, this work.
"""
if hasattr(self.scalar_op, "identity"):
return str(self.scalar_op.identity)
else:
assert isinstance(
self.scalar_op, (scalar.ScalarMaximum, scalar.ScalarMinimum)
)
if self.pre_scalar_op: # TODO: multiple dtypes
# dtype = node.inputs[0].dtype
dummy_var = scalar.Scalar(dtype=dtype)()
dummy_node = self.pre_scalar_op.make_node(dummy_var)
dummy_name = "assign_init_pre_scalar_op" + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(
dummy_node, dummy_name, (first_item,), ("",), {}
)
assert t.startswith(" = ")
first_item = t[3:]
if first_item[-1] == ";":
first_item = first_item[:-1]
return first_item
def _assign_reduce(self, node, name, left, right, sub, pre):
"""
Parameters
----------
node
The node argument to this op's c_code.
name
The name argument to this op's c_code.
left
A C code string identifying an lvalue.
right
A C code string identifying an expression.
sub
The sub argument to this op's c_code.
pre
If True, we will add the pre_scalar_op.c_code.
Returns
-------
str
C code to reduce left and right, assigning the result to left.
"""
(x,) = node.inputs
in_dtype = x.dtype
out_dtype = node.outputs[0].dtype
dummy_left = Scalar(dtype=out_dtype)()
dummy_right = Scalar(dtype=in_dtype)()
dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)
dummy_name = name + "_scalar_op" + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
if pre and self.pre_scalar_op:
assert left == "myresult"
dummy_node = self.pre_scalar_op.make_node(dummy_left)
dummy_name = name + "_scalar_op" + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(dummy_node, dummy_name, (right,), ("",), sub)
assert t.startswith(" = ")
right = t[3:]
if right[-1] == ";":
right = right[:-1]
return self.scalar_op.c_code(
dummy_node, dummy_name, (left, right), (left,), sub
)
def _k_reduce_buf(self, z_pos, node, name, sub):
"""
WRITEME
Parameters
----------
node, name, sub
These should be passed through from the original call to c_code.
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
write_out = write_w(node.outputs[0].dtype)
current_version = """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < warpSize) {
//round up all the partial sums into the first `warpSize` elements
for (int i = threadNum + warpSize; i < threadCount; i += warpSize)
{
"""
current_version += (
self._assign_reduce(node, name, "myresult", "buf[i]", sub, False)
+ """
}
buf[threadNum] = myresult;
}
__syncthreads();
for (unsigned int _n = warpSize / 2; _n > 0; _n /= 2) {
if (threadNum < _n && threadNum + _n < threadCount)
"""
)
current_version += self._assign_reduce(
node, name, "buf[threadNum]", "buf[threadNum+_n]", sub, False
)
current_version += """
__syncthreads();
}
if (threadNum == 0) {
%(z_pos)s = %(write_out)s(buf[0]);
}
"""
current_version = current_version % locals()
return current_version
# Threads must be organized as: threadNum%nb_reduce correspond to the same sum
# nb_reduce<=warpSize
def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce):
reduce_fct = self._assign_reduce(node, name, "myresult", "buf[i]", {}, False)
write_out = write_w(node.outputs[0].dtype)
return (
"""
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < %(nb_reduce)s)
{
//round up all the partial sums into the first `nb_reduce` elements
for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s)
{
%(reduce_fct)s;
}
%(z_pos)s = %(write_out)s(myresult);
}
"""
% locals()
)
def c_code_reduce_ccontig(self, sio, node, name, x, z, fail):
verbose = self.verbose
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
if getattr(self.scalar_op, "identity", None) == 0:
zero_shp = f"GpuArray_memset(&{z}->ga, 0)"
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
zero_shp = (
"""
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op");
%(fail)s;
"""
% locals()
)
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = f"kernel_reduce_ccontig_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
{
if(PyGpuArray_SIZE(%(x)s)==0){
%(zero_shp)s;
}else{
int verbose = %(verbose)s;
size_t numEls = PyGpuArray_SIZE(%(x)s);
size_t n_threads = std::min(numEls, (size_t) 256);
size_t n_blocks = 1;
void *kernel_params[] = {(void *)&numEls,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset};
if (verbose) printf("running kernel_reduce_ccontig_%(name)s"
" n_threads=%%llu, size=%%llu, ndim=%%u\\n",
n_threads, numEls,
PyGpuArray_NDIM(%(x)s));
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads;
int err = GpuKernel_call(&%(k_var)s, 1, &n_blocks, &n_threads, n_shared, kernel_params);
%(err_check)s
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_1(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_11(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1];
n_threads[1] -= 1;
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_01X(self, sio, node, name, x, z, fail, N):
"""
Parameters
----------
N
The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111
Work for N=1,2,3.
"""
assert N in [1, 2, 3]
verbose = self.verbose
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
makecall = self._makecall(node, name, x, z, fail)
N_pattern = "".join(["1"] * N)
param_dim = ",".join([f"PyGpuArray_DIMS({x})[{i}]" for i in range(N + 1)])
strides_dim = ",".join(
[f"PyGpuArray_STRIDES({x})[{i}]/sizeof({in_dtype})" for i in range(N + 1)]
)
threads_y = (
"""
//get as many y threads as we can fit
while (n_threads[0] * (n_threads[1]+1) <= 256)
{
if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1])
n_threads[1] += 1;
else
break;
}"""
% locals()
)
threads_z = (
"""
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
{
if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2])
n_threads[2] += 1;
else
break;
}
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
"""
% locals()
)
if len(self.reduce_mask) == 2:
threads_y = ""
threads_z = ""
if len(self.reduce_mask) == 3:
threads_z = ""
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1};
%(threads_y)s
%(threads_z)s
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_01(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 1)
def c_code_reduce_011(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 2)
def c_code_reduce_0111(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 3)
def c_code_reduce_10(self, sio, node, name, x, z, fail):
verbose = self.verbose
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = f"kernel_reduce_10_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
{
int verbose = %(verbose)s;
if(PyGpuArray_STRIDES(%(x)s)[0]>
PyGpuArray_STRIDES(%(x)s)[1]){
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s;
size_t A = 1;
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[1];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}else{
GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1};
if (verbose) {
fprintf(stderr,
"running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\n",
(unsigned long long)n_blocks[0],
(unsigned long long)n_blocks[1]);
}
assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0];
size_t dim_0 = 1;
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&dim_0,
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_010(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
makecall_inner = self._makecall(node, name, x, z, fail, pattern="010_inner")
pattern = "".join(str(i) for i in self.reduce_mask)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
k_var = f"kernel_reduce_010_AD_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
{
//int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2];
//if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16))
if (1) // if the alternative is less buggy, consider not using this branch
{
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
size_t A = PyGpuArray_DIMS(%(x)s)[0];
size_t B = PyGpuArray_DIMS(%(x)s)[1];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}
else
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1};
while( (n_threads[0]*(n_threads[1]+1)<=256)
&& (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){
n_threads[1]++;
}
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1};
n_blocks[1] = std::min(
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0])
);
if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),
PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)),
PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s))
==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)
&& n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0])){
if(verbose>1)
printf("n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\n",
PyGpuArray_DIMS(%(x)s)[0],4096,
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0]));
assert(n_threads[0]<=32);
%(makecall_inner)s
}else{
n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1],
(size_t) 256);
n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096);
n_blocks[1] = std::min(
PyGpuArray_DIMS(%(x)s)[2],
(size_t)(4096 / n_blocks[0])
);
%(makecall)s
}
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_0101(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_100(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = f"kernel_reduce_010_AD_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
# use threadIdx.x for i0
# use blockIdx.x for i1
# use blockIdx.y for i2
print(
"""
{
int verbose = %(verbose)s;
if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1};
while (n_blocks[0] * (n_blocks[1]+1) <= 4096 &&
n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2])
{
n_blocks[1] += 1;
}
%(makecall)s
}
else
{ // reuse 010_AD kernel, we transpose the 2 first dim
// See the reduction for the real 010_AD kernel for
// explanation. We do this to get coalesced read.
size_t n_threads[3] = {32, 1, 1};
size_t A = PyGpuArray_DIMS(%(x)s)[1];
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
size_t n_shared = 0;
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_110(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[0]*n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_001(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096)
{
if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_blocks[1] += 1;
}
n_blocks[1] -= 1;
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_101(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(
node,
name,
x,
z,
fail,
extra_dims=[("size_t one = 1;", "(void *) &one")],
extra_strides=[("ssize_t sone = 1;", "(void *) &sone")],
pattern="1011",
)
print(
"""
{
int verbose = %(verbose)s;
// size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3],
// (size_t) 256), 1, 1};
size_t n_threads[3] = {1, 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_111(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_0011(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
print(
"""
{
int verbose = %(verbose)s;
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096 &&
n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1])
{
n_blocks[1] += 1;
}
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256
&& n_threads[1] < PyGpuArray_DIMS(%(x)s)[2]
&& n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200))
{
n_threads[1] += 1;
}
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_1111(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_1011(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_cache_version_apply(self, node):
version = [
24,
self.verbose,
] # the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node = Apply(
self.scalar_op,
[Scalar(dtype=input.type.dtype)() for input in node.inputs],
[Scalar(dtype=output.type.dtype)() for output in node.outputs],
)
version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))
for i in node.inputs + node.outputs:
version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())
version.extend(self.kernel_version(node))
if all(version):
return tuple(version)
else:
return ()
def gpu_kernels(self, node, nodename):
nd_in = len(self.reduce_mask)
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
assign_dtype = in_dtype
flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype)
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
load_in = load_w(in_dtype)
write_out = write_w(out_dtype)
kernels = []
if all(i == 1 for i in self.reduce_mask):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node, nodename, "myresult", load_in + "(A[i0])", {}, True
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
kname = "kernel_reduce_ccontig"
k_var = "kernel_reduce_ccontig_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
%(out_type)s *Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = ["uintp", gpuarray.GpuArray, "uintp", gpuarray.GpuArray, "uintp"]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1,):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node, nodename, "myresult", load_in + "(A[i0 * sA0])", {}, True
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
kname = "kernel_reduce_1"
k_var = "kernel_reduce_1_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
gpuarray.GpuArray,
"uintp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1])",
{},
True,
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
kname = "kernel_reduce_11"
k_var = "kernel_reduce_11_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y*blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
# 01, 011, 0111
if (
0 == self.reduce_mask[0]
and all(self.reduce_mask[1:])
and nd_in in [2, 3, 4]
):
# this kernel uses one block for each row.
# threads per block for each element per row.
N_pattern = "".join(["1"] * (nd_in - 1))
# TODO: is it faster to hardcode sA3, etc. in the later
# code, rather than have the for_* variables declare them
# and the later code use their names?
if nd_in == 2:
for_i1 = "for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)"
first_i1 = "threadIdx.x"
sA1 = "sA1"
for_i2 = "int i2=0, sA2=0;"
sA2 = "0"
first_i2 = "0"
for_i3 = "int i3=0, sA3=0;"
sA3 = "0"
first_i3 = "0"
if nd_in == 3:
for_i1 = "for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)"
first_i1 = "threadIdx.y"
sA1 = "sA1"
for_i2 = "for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)"
first_i2 = "threadIdx.x"
sA2 = "sA2"
for_i3 = "int i3=0, sA3=0;"
first_i3 = 0
sA3 = "0"
if nd_in == 4:
for_i1 = "for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)"
first_i1 = "threadIdx.z"
sA1 = "sA1"
for_i2 = "for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)"
first_i2 = "threadIdx.y"
sA2 = "sA2"
for_i3 = "for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)"
first_i3 = "threadIdx.x"
sA3 = "sA3"
reducebuf = self._k_reduce_buf("Z[i0 * sZ0]", node, nodename, sub={})
param_dim = ",".join([f"const ga_size d{i}" for i in range(nd_in)])
param_strides = ",".join([f"const ga_ssize sA{i}" for i in range(nd_in)])
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_init = self._assign_init(
load_in
+ "(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])"
% locals(),
assign_dtype,
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])",
{},
True,
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){
myresult = %(reduce_init)s;
%(for_i1)s{
%(for_i2)s{
%(for_i3)s{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i2*sZ1]", node, nodename, sub={}
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])", assign_dtype
)
kname = "kernel_reduce_010"
k_var = "kernel_reduce_010_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]:
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(X[a * sX0 + b * sX1 + c * sX2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(X[a * sX0 + 0 * sX1 + c * sX2])", assign_dtype
)
kname = "kernel_reduce_010_AD"
k_var = "kernel_reduce_010_AD_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size A, const ga_size B, const ga_size C, const ga_size D,
const %(in_type)s *X, const ga_size offset_X,
const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
X = (const %(in_type)s *)(((char *)X)+offset_X);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = 0;
for (int a = blockIdx.x; a < A; a += gridDim.x)
{
for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y)
{
int c = i2_D * 32 + threadIdx.x;
if (c < C)
{
myresult = %(reduce_init)s;
for (int b = 0; b < B; ++b)
{
%(reduce_fct)s;
}
Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult);
}
}
}
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 1, 0):
#
# This kernel is optimized when the inner most dimensions
# have the smallest stride.
# this kernel uses one block for multiple column(up to 32TODO),
# threads per block for each element per column.
# thread.x = dim 2 contiguous
# thread.y = dim 1
# block.x = dim 0
# block.y = dim 1 rest
init = self._k_init(node, nodename)
decl, kname, params, k_var = self._k_decl(
node, nodename, pattern="010_inner"
)
reducebuf = self._k_reduce_buf_multiple(
"Z[i0 * sZ0 + i2*sZ1]", node, nodename, "blockDim.x"
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + 0 * sA1 + i2 * sA2])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x)
{
myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf(
"Z[blockIdx.x * sZ0]", node, nodename, sub={}
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[blockIdx.x * sA2])", assign_dtype
)
kname = "kernel_reduce_110"
k_var = "kernel_reduce_110_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 0, 0):
reducebuf = self._k_reduce_buf(
"Z[i1 * sZ0 + i2 * sZ1]", node, nodename, sub={}
)
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i1 * sA1 + i2 * sA2])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x)
{
myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1, 1):
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i1 * sZ1]", node, nodename, sub={}
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + i1 * sA1])", assign_dtype
)
kname = "kernel_reduce_001"
k_var = "kernel_reduce_001_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 0, 1, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i1 * sZ1]", node, nodename, sub={}
)
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + i1 * sA1])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 1, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i2 * sZ1]", node, nodename, sub={}
)
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + i2 * sA2])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1, 1, 1):
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = 0; i0 < d0; i0++)
for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1):
reducebuf = self._k_reduce_buf("Z[blockIdx.x*sZ0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[blockIdx.x * sA1])", assign_dtype
)
kname = "kernel_reduce_1011"
k_var = "kernel_reduce_1011_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
return kernels
class GpuErfinv(Erfinv):
"""
Inverse error function for GPU.
"""
def c_headers(self):
return ["math_functions.h", "cublas_v2.h"]
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if node.inputs[0].type in complex_types:
raise NotImplementedError("type not supported", type)
# NB: CUDA erfinv function (GPU op) returns NaN if x not in [-1;1],
# while `scipy.special.erfinv` (CPU op) returns an infinite (-inf if x < -1, +inf if x > 1).
# For consistency of CPU and GPU ops, we wrap the CUDA erfinv in the following conditions
# to ensure that GPU op returns the same values as CPU op.
return (
"%(z)s = (%(x)s <= -1) ? erfinv(-1.0): ((%(x)s >= 1) ? erfinv(1.0): erfinv(%(x)s));"
% locals()
)
gpu_erfinv = GpuErfinv(upgrade_to_float_no_complex, name="gpu_erfinv")
class GpuErfcinv(Erfcinv):
"""
Inverse complementary error function for GPU.
"""
def c_headers(self):
return ["math_functions.h", "cublas_v2.h"]
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if node.inputs[0].type in complex_types:
raise NotImplementedError("type not supported", type)
# NB: CUDA erfcinv function (GPU op) returns NaN if x not in [0;2],
# while `scipy.special.erfcinv` (CPU op) returns an infinite (+inf if x < 0, -inf if x > 2).
# For consistency of CPU and GPU ops, we wrap the CUDA erfcinv in the following conditions
# to ensure that GPU op returns the same values as CPU op.
return (
"%(z)s = (%(x)s <= 0) ? erfcinv(0.0): ((%(x)s >= 2) ? erfcinv(2.0): erfcinv(%(x)s));"
% locals()
)
gpu_erfcinv = GpuErfcinv(upgrade_to_float_no_complex, name="gpu_erfcinv")
# Caching GpuCAReduceCuda
def gpu_ca_reduce_cuda(
scalar_op,
axis=None,
reduce_mask=None,
dtype=None,
acc_dtype=None,
pre_scalar_op=None,
):
key = (scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op)
if key not in gpu_ca_reduce_cuda.cache:
gpu_ca_reduce_cuda.cache[key] = GpuCAReduceCuda(
scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op
)
return gpu_ca_reduce_cuda.cache[key]
gpu_ca_reduce_cuda.cache = {}
class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):
"""
CAReduce that reuse the python code from gpuarray.
"""
def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):
if not hasattr(scalar_op, "identity"):
raise ValueError("No identity on scalar op")
CAReduceDtype.__init__(
self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype
)
def __str__(self):
ax = ""
if self.axis is not None:
ax = f"{{{', '.join(str(x) for x in self.axis)}}}"
return f"GpuReduce{{{self.scalar_op}}}{ax}"
def make_node(self, input):
ctx_name = infer_context_name(input)
res = CAReduceDtype.make_node(self, input)
input = as_gpuarray_variable(input, ctx_name)
otype = GpuArrayType(
dtype=res.outputs[0].dtype,
broadcastable=res.outputs[0].broadcastable,
context_name=ctx_name,
)
if res.op.axis is not None:
redux = []
for i in range(len(input.type.broadcastable)):
redux.append(i in res.op.axis)
# since redux is just another way to describe what is in axis
# it doesn't need to be compared in __eq__ or __hash__
res.op.redux = redux
return Apply(res.op, [input], [otype()])
def get_params(self, node):
return node.outputs[0].type.context
def prepare_node(self, node, storage_map, compute_map, impl):
# cache the kernel object
self.get_kernel_cache(node)
def get_kernel_cache(self, node):
attr = "@cache_reduction_k"
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
if not hasattr(node, attr):
acc_dtype = getattr(self, "acc_dtype", None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
if any(redux):
setattr(node, attr, self.generate_kernel(node, acc_dtype, redux))
if any(redux):
return getattr(node, attr)
def gpu_kernels(self, node, name):
if not any(getattr(self, "redux", [node.inputs[0].ndim != 0])):
# Some OpenCL compilers do not accept no-arguments empty kernels
src = '#include "cluda.h"\nKERNEL void reduk(GLOBAL_MEM float *a) { a[0] = 0; }'
params = ["float32"]
else:
k = self.get_kernel_cache(node)
_, src, _, _ = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)
nd = node.inputs[0].ndim
params = ["uint32", gpuarray.GpuArray, "uint32"]
params.extend("uint32" for _ in range(nd))
params.append(gpuarray.GpuArray)
params.append("uint32")
params.extend("int32" for _ in range(nd))
acc_dtype = getattr(self, "acc_dtype", None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
return [
Kernel(
code=src,
name="reduk",
params=params,
flags=Kernel.get_flags(
node.inputs[0].type.dtype, acc_dtype, node.outputs[0].type.dtype
),
objvar="k_reduk_" + name,
)
]
def c_code(self, node, name, inp, out, sub):
if not any(getattr(self, "redux", [node.inputs[0].ndim != 0])):
# We special case the no-reduction case since the gpu
# kernel has trouble handling it.
return """
Py_XDECREF(%(out)s);
%(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);
if (!%(out)s) {
%(fail)s
}
""" % dict(
out=out[0], inp=inp[0], fail=sub["fail"]
)
k = self.get_kernel_cache(node)
_, src, _, ls = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
acc_dtype = getattr(self, "acc_dtype", None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
input = inp[0]
output = out[0]
nd_out = node.outputs[0].ndim
code = """
size_t gs = 1;
size_t ls;
unsigned int n = 1;
unsigned int proxy_dim[%(nd_in)s];
unsigned int proxy_off;
int proxy_str[%(nd_in)s];
void *args[%(n_args)s];
PyGpuArrayObject *tmp;
int err;
""" % dict(
n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim
)
if nd_out != 0:
code += """
size_t out_dims[%(nd_out)s];
int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s;
""" % dict(
nd_out=nd_out, output=output
)
j = 0
for i in range(node.inputs[0].ndim):
if not self.redux[i]:
code += """
out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s];
if (!need_out)
need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s];
""" % dict(
j=j, i=i, input=input, output=output
)
j += 1
code += """
if (need_out) {
%(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(
output=output,
nd_out=nd_out,
fail=sub["fail"],
ctx=sub["params"],
out_type=dtype_to_typecode(node.outputs[0].type.dtype),
)
else:
code += """
if (%(output)s == NULL || %(output)s->ga.nd != 0) {
Py_XDECREF(%(output)s);
%(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(
output=output,
fail=sub["fail"],
ctx=sub["params"],
out_type=dtype_to_typecode(node.outputs[0].type.dtype),
)
if acc_dtype != node.outputs[0].type.dtype:
code += """
tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions,
%(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!tmp) %(fail)s
""" % dict(
output=output,
fail=sub["fail"],
ctx=sub["params"],
acc_type=dtype_to_typecode(acc_dtype),
)
else:
code += f"""
tmp = {output};
Py_INCREF(tmp);
"""
# We need the proxies since we are passing a pointer to the
# data into the call and therefore we need a real copy of the
# data in the proper type.
code += """
args[0] = &n;
args[1] = tmp->ga.data;
args[2] = &tmp->ga.offset;
""" % dict(
output=output
)
p = 3
for i in range(node.inputs[0].ndim):
code += """
proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s];
args[%(p)s] = &proxy_dim[%(i)s];
n *= %(input)s->ga.dimensions[%(i)s];
""" % dict(
i=i, p=p, input=input
)
p += 1
if not redux[i]:
code += "gs *= %(input)s->ga.dimensions[%(i)s];" % dict(
input=input, i=i
)
code += """
args[%(p)s] = %(input)s->ga.data;
proxy_off = %(input)s->ga.offset;
args[%(p)s+1] = &proxy_off;
""" % dict(
p=p, input=input
)
p += 2
for i in range(node.inputs[0].ndim):
code += """
proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s];
args[%(p)s] = &proxy_str[%(i)s];
""" % dict(
p=p, i=i, input=input
)
p += 1
code += """
if (gs == 0) gs = 1;
n /= gs;
ls = %(ls)s;
err = GpuKernel_call(&%(k_var)s, 1, &gs, &ls, 0, args);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s
}
if (%(cast_out)d) {
err = GpuArray_move(&%(output)s->ga, &tmp->ga);
Py_XDECREF(tmp);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY [cast]: %%s.",
GpuArray_error(&tmp->ga, err));
%(fail)s
}
} else {
Py_XDECREF(%(output)s);
%(output)s = tmp;
}
""" % dict(
k_var="k_reduk_" + name,
ls=ls,
fail=sub["fail"],
output=output,
input=input,
cast_out=bool(acc_dtype != node.outputs[0].type.dtype),
)
return code
def c_code_cache_version_apply(self, node):
return (4, self.kernel_version(node))
def generate_kernel(self, node, odtype, redux):
if isinstance(self.scalar_op, scalar.basic.Add):
reduce_expr = "a + b"
elif isinstance(self.scalar_op, scalar.basic.Mul):
reduce_expr = "a * b"
else:
raise NotImplementedError()
return ReductionKernel(
node.inputs[0].type.context,
odtype,
self.scalar_op.identity,
reduce_expr,
redux,
arguments=[make_argument(node.inputs[0], "a")],
init_nd=node.inputs[0].ndim,
)
def perform(self, node, inp, out, ctx):
(input,) = inp
(output,) = out
if self.axis is None:
redux = [True] * input.ndim
else:
redux = self.redux
if any(redux):
output[0] = self.get_kernel_cache(node)(input).astype(
copy=False, dtype=node.outputs[0].type.dtype
)
else:
output[0] = pygpu.gpuarray.array(
input, copy=True, dtype=node.outputs[0].type.dtype, context=ctx
)
# To allow reloading old pickled files
GpuCAReduce = GpuCAReduceCPY
| 35.17429
| 117
| 0.458286
|
7953126f52b2dd2a836c29dfc8938147228ef5ae
| 2,336
|
py
|
Python
|
savecode/threeyears/idownserver/taskbackdealer/taskbackmanager.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2019-05-19T11:54:26.000Z
|
2019-05-19T12:03:49.000Z
|
savecode/threeyears/idownserver/taskbackdealer/taskbackmanager.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 1
|
2020-11-27T07:55:15.000Z
|
2020-11-27T07:55:15.000Z
|
savecode/threeyears/idownserver/taskbackdealer/taskbackmanager.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2021-09-06T18:06:12.000Z
|
2021-12-31T07:44:43.000Z
|
"""task dispath manager"""
# -*- coding:utf-8 -*-
import traceback
from datacontract import DataMatcher, InputData
from ..config_taskbackdeal import taskbackconfig
from ..servicemanager import DealerBase
from .taskbackdealerbase import TaskBackDealerBase
class TaskBackManager(DealerBase):
"""dispatch tasks"""
def __init__(self, datamatcher: DataMatcher):
DealerBase.__init__(self, datamatcher)
# 任务数据分配器,配置读取
self._dealers: dict = taskbackconfig._dealers
def _start(self):
"""启动数据分配/状态解析线程"""
for disp in self._dealers.items():
disp[1].start()
def _deal_data(self, data: InputData) -> bool:
res: bool = False
try:
# 若dealer返回None,说明其内部自行处理data的处理情况,
# 外部不处理data.on_complete()
res = self._to_dealer(data)
if res == False:
self._logger.error("Unrecognized data: %s" % data._source)
data.on_complete(False)
except Exception:
self._logger.error("On data in error:\ndata:%s\nerror:%s" %
(data, traceback.format_exc()))
if not data is None:
data.on_complete(False)
return res
def _to_dealer(self, data: InputData) -> bool:
"""按分配器关联的数据源,将任务分配到任务分配器"""
res: bool = None
try:
matched: bool = False
# 若处理正常则返回None,表示内部自行处理Task的完成状态
for dealer in self._dealers.values():
dealer: TaskBackDealerBase = dealer
if dealer._datamatcher.match_data(data):
matched = True
dealer.on_data_in(data)
elif isinstance(
dealer._relation_inputer_src, list
) and data._srcmark in dealer._relation_inputer_src:
# 找出所有显式配置的,与当前data关联的分配器
matched = True
dealer.on_data_in(data)
if not matched:
self._logger.error("No dealer matches data: {}".format(
data.name))
data.on_complete(False)
except Exception:
self._logger.error("Task allocate to dispatcher error: %s" %
traceback.format_exc())
res = False
return res
| 31.567568
| 74
| 0.559932
|
795312861c9e7f6c44564cd005aacadce888ba2e
| 2,978
|
py
|
Python
|
matminer/featurizers/site/tests/test_bonding.py
|
ncfrey/matminer
|
5a688de8f2c7eaf5109d34d58ab7875cfe980e48
|
[
"BSD-3-Clause-LBNL"
] | 326
|
2017-01-26T00:12:27.000Z
|
2022-03-22T15:07:31.000Z
|
matminer/featurizers/site/tests/test_bonding.py
|
ncfrey/matminer
|
5a688de8f2c7eaf5109d34d58ab7875cfe980e48
|
[
"BSD-3-Clause-LBNL"
] | 578
|
2017-01-02T23:57:11.000Z
|
2022-03-31T13:01:31.000Z
|
matminer/featurizers/site/tests/test_bonding.py
|
ncfrey/matminer
|
5a688de8f2c7eaf5109d34d58ab7875cfe980e48
|
[
"BSD-3-Clause-LBNL"
] | 182
|
2017-01-12T18:45:26.000Z
|
2022-03-24T15:03:54.000Z
|
import unittest
import numpy as np
from pymatgen.analysis.local_env import VoronoiNN, CrystalNN
from matminer.featurizers.site.bonding import (
BondOrientationalParameter,
AverageBondLength,
AverageBondAngle,
)
from matminer.featurizers.site.tests.base import SiteFeaturizerTest
class BondingTest(SiteFeaturizerTest):
def test_bop(self):
f = BondOrientationalParameter(max_l=10, compute_w=True, compute_w_hat=True)
# Check the feature count
self.assertEqual(30, len(f.feature_labels()))
self.assertEqual(30, len(f.featurize(self.sc, 0)))
f.compute_W = False
self.assertEqual(20, len(f.feature_labels()))
self.assertEqual(20, len(f.featurize(self.sc, 0)))
f.compute_What = False
self.assertEqual(10, len(f.featurize(self.sc, 0)))
self.assertEqual(10, len(f.featurize(self.sc, 0)))
f.compute_W = f.compute_What = True
# Compute it for SC and B1
sc_features = f.featurize(self.sc, 0)
b1_features = f.featurize(self.b1, 0)
# They should be equal
self.assertArrayAlmostEqual(sc_features, b1_features)
# Comparing Q's to results from https://aip.scitation.org/doi/10.1063/1.4774084
self.assertArrayAlmostEqual([0, 0, 0, 0.764, 0, 0.354, 0, 0.718, 0, 0.411], sc_features[:10], decimal=3)
# Comparing W's to results from https://link.aps.org/doi/10.1103/PhysRevB.28.784
self.assertArrayAlmostEqual(
[0, 0, 0, 0.043022, 0, 0.000612, 0, 0.034055, 0, 0.013560],
sc_features[10:20],
decimal=3,
)
self.assertArrayAlmostEqual(
[0, 0, 0, 0.159317, 0, 0.013161, 0, 0.058455, 0, 0.090130],
sc_features[20:],
decimal=3,
)
def test_AverageBondLength(self):
ft = AverageBondLength(VoronoiNN())
self.assertAlmostEqual(ft.featurize(self.sc, 0)[0], 3.52)
for i in range(len(self.cscl.sites)):
self.assertAlmostEqual(ft.featurize(self.cscl, i)[0], 3.758562645051973)
for i in range(len(self.b1.sites)):
self.assertAlmostEqual(ft.featurize(self.b1, i)[0], 1.0)
ft = AverageBondLength(CrystalNN())
for i in range(len(self.cscl.sites)):
self.assertAlmostEqual(ft.featurize(self.cscl, i)[0], 3.649153279231275)
def test_AverageBondAngle(self):
ft = AverageBondAngle(VoronoiNN())
self.assertAlmostEqual(ft.featurize(self.sc, 0)[0], np.pi / 2)
for i in range(len(self.cscl.sites)):
self.assertAlmostEqual(ft.featurize(self.cscl, i)[0], 0.9289637531152273)
for i in range(len(self.b1.sites)):
self.assertAlmostEqual(ft.featurize(self.b1, i)[0], np.pi / 2)
ft = AverageBondAngle(CrystalNN())
for i in range(len(self.b1.sites)):
self.assertAlmostEqual(ft.featurize(self.b1, i)[0], np.pi / 2)
if __name__ == "__main__":
unittest.main()
| 34.229885
| 112
| 0.636333
|
7953166c979acc1b82a4a36e3f9f9393c433dccb
| 25,833
|
py
|
Python
|
python/ccxt/bibox.py
|
ZacharyATanenbaum/ccxt
|
539a9e83dd2ca6a547103fd5910e0e776af5a48a
|
[
"MIT"
] | 2
|
2020-02-25T22:50:00.000Z
|
2020-09-11T00:24:45.000Z
|
python/ccxt/bibox.py
|
ZacharyATanenbaum/ccxt
|
539a9e83dd2ca6a547103fd5910e0e776af5a48a
|
[
"MIT"
] | 8
|
2018-09-04T05:28:15.000Z
|
2018-12-21T08:10:35.000Z
|
python/ccxt/bibox.py
|
ZacharyATanenbaum/ccxt
|
539a9e83dd2ca6a547103fd5910e0e776af5a48a
|
[
"MIT"
] | 1
|
2022-03-15T23:30:16.000Z
|
2022-03-15T23:30:16.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
class bibox (Exchange):
def describe(self):
return self.deep_extend(super(bibox, self).describe(), {
'id': 'bibox',
'name': 'Bibox',
'countries': ['CN', 'US', 'KR'],
'version': 'v1',
'has': {
'CORS': False,
'publicAPI': False,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': True,
'fetchTickers': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'createMarketOrder': False, # or they will return https://github.com/ccxt/ccxt/issues/2338
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'12h': '12hour',
'1d': 'day',
'1w': 'week',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/34902611-2be8bf1a-f830-11e7-91a2-11b2f292e750.jpg',
'api': 'https://api.bibox.com',
'www': 'https://www.bibox.com',
'doc': [
'https://github.com/Biboxcom/api_reference/wiki/home_en',
'https://github.com/Biboxcom/api_reference/wiki/api_reference',
],
'fees': 'https://bibox.zendesk.com/hc/en-us/articles/115004417013-Fee-Structure-on-Bibox',
'referral': 'https://www.bibox.com/signPage?id=11114745&lang=en',
},
'api': {
'public': {
'post': [
# TODO: rework for full endpoint/cmd paths here
'mdata',
],
'get': [
'mdata',
],
},
'private': {
'post': [
'user',
'orderpending',
'transfer',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.001,
'maker': 0.0,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'exceptions': {
'2021': InsufficientFunds, # Insufficient balance available for withdrawal
'2015': AuthenticationError, # Google authenticator is wrong
'2027': InsufficientFunds, # Insufficient balance available(for trade)
'2033': OrderNotFound, # operation failednot Orders have been completed or revoked
'2067': InvalidOrder, # Does not support market orders
'2068': InvalidOrder, # The number of orders can not be less than
'3012': AuthenticationError, # invalid apiKey
'3024': PermissionDenied, # wrong apikey permissions
'3025': AuthenticationError, # signature failed
'4000': ExchangeNotAvailable, # current network is unstable
'4003': DDoSProtection, # server busy please try again later
},
'commonCurrencies': {
'KEY': 'Bihu',
'PAI': 'PCHAIN',
},
})
def fetch_markets(self, params={}):
response = self.publicGetMdata(self.extend({
'cmd': 'marketAll',
}, params))
markets = response['result']
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = market['coin_symbol']
quoteId = market['currency_symbol']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
id = base + '_' + quote
precision = {
'amount': 4,
'price': 8,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': base,
'quoteId': quote,
'active': True,
'info': market,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': None,
},
'price': {
'min': None,
'max': None,
},
},
})
return result
def parse_ticker(self, ticker, market=None):
# we don't set values that are not defined by the exchange
timestamp = self.safe_integer(ticker, 'timestamp')
symbol = None
if market is not None:
symbol = market['symbol']
else:
base = ticker['coin_symbol']
quote = ticker['currency_symbol']
symbol = self.common_currency_code(base) + '/' + self.common_currency_code(quote)
last = self.safe_float(ticker, 'last')
change = self.safe_float(ticker, 'change')
baseVolume = None
if 'vol' in ticker:
baseVolume = self.safe_float(ticker, 'vol')
else:
baseVolume = self.safe_float(ticker, 'vol24H')
open = None
if (last is not None) and(change is not None):
open = last - change
iso8601 = None
if timestamp is not None:
iso8601 = self.iso8601(timestamp)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': iso8601,
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_string(ticker, 'percent'),
'average': None,
'baseVolume': baseVolume,
'quoteVolume': self.safe_float(ticker, 'amount'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMdata(self.extend({
'cmd': 'ticker',
'pair': market['id'],
}, params))
return self.parse_ticker(response['result'], market)
def parse_tickers(self, rawTickers, symbols=None):
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
return self.filter_by_array(tickers, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
response = self.publicGetMdata(self.extend({
'cmd': 'marketAll',
}, params))
return self.parse_tickers(response['result'], symbols)
def parse_trade(self, trade, market=None):
timestamp = self.safe_integer(trade, 'time')
timestamp = self.safe_integer(trade, 'createdAt', timestamp)
side = self.safe_integer(trade, 'side')
side = self.safe_integer(trade, 'order_side', side)
side = 'buy' if (side == 1) else 'sell'
symbol = None
if market is None:
marketId = self.safe_string(trade, 'pair')
if marketId is None:
baseId = self.safe_string(trade, 'coin_symbol')
quoteId = self.safe_string(trade, 'currency_symbol')
if (baseId is not None) and(quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
fee = None
feeCost = self.safe_float(trade, 'fee')
feeCurrency = self.safe_string(trade, 'fee_symbol')
if feeCurrency is not None:
if feeCurrency in self.currencies_by_id:
feeCurrency = self.currencies_by_id[feeCurrency]['code']
else:
feeCurrency = self.common_currency_code(feeCurrency)
feeRate = None # todo: deduce from market if market is defined
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = price * amount
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
return {
'info': trade,
'id': self.safe_string(trade, 'id'),
'order': None, # Bibox does not have it(documented) yet
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': 'limit',
'takerOrMaker': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
size = limit if (limit) else 200
response = self.publicGetMdata(self.extend({
'cmd': 'deals',
'pair': market['id'],
'size': size,
}, params))
return self.parse_trades(response['result'], market, since, limit)
def fetch_order_book(self, symbol, limit=200, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'depth',
'pair': market['id'],
}
request['size'] = limit # default = 200 ?
response = self.publicGetMdata(self.extend(request, params))
return self.parse_order_book(response['result'], self.safe_float(response['result'], 'update_time'), 'bids', 'asks', 'price', 'volume')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv['time'],
ohlcv['open'],
ohlcv['high'],
ohlcv['low'],
ohlcv['close'],
ohlcv['vol'],
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMdata(self.extend({
'cmd': 'kline',
'pair': market['id'],
'period': self.timeframes[timeframe],
'size': limit,
}, params))
return self.parse_ohlcvs(response['result'], market, timeframe, since, limit)
def fetch_currencies(self, params={}):
response = self.privatePostTransfer({
'cmd': 'transfer/coinList',
'body': {},
})
currencies = response['result']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['symbol']
code = self.common_currency_code(id)
precision = 8
deposit = currency['enable_deposit']
withdraw = currency['enable_withdraw']
active = True if (deposit and withdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': currency['name'],
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostTransfer({
'cmd': 'transfer/assets',
'body': self.extend({
'select': 1,
}, params),
})
balances = response['result']
result = {'info': balances}
indexed = None
if 'assets_list' in balances:
indexed = self.index_by(balances['assets_list'], 'coin_symbol')
else:
indexed = balances
keys = list(indexed.keys())
for i in range(0, len(keys)):
id = keys[i]
code = id.upper()
if code.find('TOTAL_') >= 0:
code = code[6:]
if code in self.currencies_by_id:
code = self.currencies_by_id[code]['code']
account = self.account()
balance = indexed[id]
if isinstance(balance, basestring):
balance = float(balance)
account['free'] = balance
account['used'] = 0.0
account['total'] = balance
else:
account['free'] = float(balance['balance'])
account['used'] = float(balance['freeze'])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderType = 2 if (type == 'limit') else 1
orderSide = 1 if (side == 'buy') else 2
response = self.privatePostOrderpending({
'cmd': 'orderpending/trade',
'body': self.extend({
'pair': market['id'],
'account_type': 0,
'order_type': orderType,
'order_side': orderSide,
'pay_bix': 0,
'amount': amount,
'price': price,
}, params),
})
return {
'info': response,
'id': self.safe_string(response, 'result'),
}
def cancel_order(self, id, symbol=None, params={}):
response = self.privatePostOrderpending({
'cmd': 'orderpending/cancelTrade',
'body': self.extend({
'orders_id': id,
}, params),
})
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrderpending({
'cmd': 'orderpending/order',
'body': self.extend({
'id': id,
}, params),
})
order = self.safe_value(response, 'result')
if self.is_empty(order):
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return self.parse_order(order)
def parse_order(self, order, market=None):
symbol = None
if market is None:
marketId = None
baseId = self.safe_string(order, 'coin_symbol')
quoteId = self.safe_string(order, 'currency_symbol')
if (baseId is not None) and(quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
type = 'market' if (order['order_type'] == 1) else 'limit'
timestamp = order['createdAt']
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'deal_price')
filled = self.safe_float(order, 'deal_amount')
amount = self.safe_float(order, 'amount')
cost = self.safe_float_2(order, 'deal_money', 'money')
remaining = None
if filled is not None:
if amount is not None:
remaining = amount - filled
if cost is None:
cost = price * filled
side = 'buy' if (order['order_side'] == 1) else 'sell'
status = self.safe_string(order, 'status')
if status is not None:
status = self.parse_order_status(status)
result = {
'info': order,
'id': self.safe_string(order, 'id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost if cost else float(price) * filled,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': self.safe_float(order, 'fee'),
}
return result
def parse_order_status(self, status):
statuses = {
# original comments from bibox:
'1': 'open', # pending
'2': 'open', # part completed
'3': 'closed', # completed
'4': 'canceled', # part canceled
'5': 'canceled', # canceled
'6': 'canceled', # canceling
}
return self.safe_string(statuses, status, status.lower())
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
market = None
pair = None
if symbol is not None:
self.load_markets()
market = self.market(symbol)
pair = market['id']
size = limit if (limit) else 200
response = self.privatePostOrderpending({
'cmd': 'orderpending/orderPendingList',
'body': self.extend({
'pair': pair,
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': size,
}, params),
})
orders = self.safe_value(response['result'], 'items', [])
return self.parse_orders(orders, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=200, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchClosedOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
response = self.privatePostOrderpending({
'cmd': 'orderpending/pendingHistoryList',
'body': self.extend({
'pair': market['id'],
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': limit,
}, params),
})
orders = self.safe_value(response['result'], 'items', [])
return self.parse_orders(orders, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchMyTrades requires a symbol argument')
self.load_markets()
market = self.market(symbol)
size = limit if (limit) else 200
response = self.privatePostOrderpending({
'cmd': 'orderpending/orderHistoryList',
'body': self.extend({
'pair': market['id'],
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': size,
}, params),
})
trades = self.safe_value(response['result'], 'items', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
response = self.privatePostTransfer({
'cmd': 'transfer/transferIn',
'body': self.extend({
'coin_symbol': currency['id'],
}, params),
})
address = self.safe_string(response, 'result')
tag = None # todo: figure self out
result = {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
return result
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if self.password is None:
if not('trade_pwd' in list(params.keys())):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a trade_pwd parameter')
if not('totp_code' in list(params.keys())):
raise ExchangeError(self.id + ' withdraw() requires a totp_code parameter for 2FA authentication')
body = {
'trade_pwd': self.password,
'coin_symbol': currency['id'],
'amount': amount,
'addr': address,
}
if tag is not None:
body['address_remark'] = tag
response = self.privatePostTransfer({
'cmd': 'transfer/transferOut',
'body': self.extend(body, params),
})
return {
'info': response,
'id': None,
}
def fetch_funding_fees(self, codes=None, params={}):
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define codes = ['ETH', 'BTC'] in args it will only load those
self.load_markets()
withdrawFees = {}
info = {}
if codes is None:
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
response = self.privatePostTransfer({
'cmd': 'transfer/transferOutInfo',
'body': self.extend({
'coin_symbol': currency['id'],
}, params),
})
info[code] = response
withdrawFees[code] = response['result']['withdraw_fee']
return {
'info': info,
'withdraw': withdrawFees,
'deposit': {},
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
cmds = self.json([params])
if api == 'public':
if method != 'GET':
body = {'cmds': cmds}
elif params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
body = {
'cmds': cmds,
'apikey': self.apiKey,
'sign': self.hmac(self.encode(cmds), self.encode(self.secret), hashlib.md5),
}
if body is not None:
body = self.json(body, {'convertArraysToObjects': True})
headers = {'Content-Type': 'application/json'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if len(body) > 0:
if body[0] == '{':
response = json.loads(body)
if 'error' in response:
if 'code' in response['error']:
code = self.safe_string(response['error'], 'code')
feedback = self.id + ' ' + body
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
raise ExchangeError(self.id + ': "error" in response: ' + body)
if not('result' in list(response.keys())):
raise ExchangeError(self.id + ' ' + body)
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if method == 'GET':
return response
else:
return response['result'][0]
| 37.989706
| 143
| 0.496187
|
79531698bcf5d0e7c61489b602808abdd6fccd1b
| 1,159
|
py
|
Python
|
solutions/03_01.py
|
glemaitre/IBIOM-M2-deep-learning
|
001bf7834e57a7357326087d31049fc91ab8967f
|
[
"MIT"
] | null | null | null |
solutions/03_01.py
|
glemaitre/IBIOM-M2-deep-learning
|
001bf7834e57a7357326087d31049fc91ab8967f
|
[
"MIT"
] | null | null | null |
solutions/03_01.py
|
glemaitre/IBIOM-M2-deep-learning
|
001bf7834e57a7357326087d31049fc91ab8967f
|
[
"MIT"
] | null | null | null |
def my_init(shape=(5, 5, 3, 3), dtype=None):
array = np.zeros(shape=shape)
array[2, 2] = np.eye(3)
return array
conv_strides_same = Sequential([
Conv2D(filters=3, kernel_size=5, strides=2,
padding="same", kernel_initializer=my_init,
input_shape=(None, None, 3))
])
conv_strides_valid = Sequential([
Conv2D(filters=3, kernel_size=5, strides=2,
padding="valid", kernel_initializer=my_init,
input_shape=(None, None, 3))
])
img_in = np.expand_dims(sample_image, 0)
img_out_same = conv_strides_same.predict(img_in)
img_out_valid = conv_strides_valid.predict(img_in)
print("Shape of result with SAME padding:", img_out_same.shape)
print("Shape of result with VALID padding:", img_out_valid.shape)
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(12, 4))
ax0.imshow(img_in[0].astype(np.uint8))
ax1.imshow(img_out_same[0].astype(np.uint8))
ax2.imshow(img_out_valid[0].astype(np.uint8))
# We observe that the stride divided the size of the image by 2
# In the case of 'VALID' padding mode, no padding is added, so
# the size of the ouput image is actually 1 less because of the
# kernel size
| 34.088235
| 65
| 0.709232
|
795317d48d79f204ed3aa3c6aaee613f07389742
| 87
|
py
|
Python
|
run1.py
|
kaaiian/use_roost
|
b7a53a4e79c0d2d3a9a575493cf215d23cacab8f
|
[
"MIT"
] | null | null | null |
run1.py
|
kaaiian/use_roost
|
b7a53a4e79c0d2d3a9a575493cf215d23cacab8f
|
[
"MIT"
] | null | null | null |
run1.py
|
kaaiian/use_roost
|
b7a53a4e79c0d2d3a9a575493cf215d23cacab8f
|
[
"MIT"
] | null | null | null |
from use_roost__learn_on_aflow_script import run
if __name__ == '__main__':
run(1)
| 21.75
| 48
| 0.770115
|
7953192a28b8d231a4bf96e92675d53b2f5558f2
| 55
|
py
|
Python
|
camd/experiment/__init__.py
|
MuratAykol-TRI/CAMD-1
|
ed331ad6706f36f092739152a06f156ed87fdae2
|
[
"Apache-2.0"
] | null | null | null |
camd/experiment/__init__.py
|
MuratAykol-TRI/CAMD-1
|
ed331ad6706f36f092739152a06f156ed87fdae2
|
[
"Apache-2.0"
] | null | null | null |
camd/experiment/__init__.py
|
MuratAykol-TRI/CAMD-1
|
ed331ad6706f36f092739152a06f156ed87fdae2
|
[
"Apache-2.0"
] | null | null | null |
from camd.experiment.base import Experiment, ATFSampler
| 55
| 55
| 0.872727
|
7953197eead256ee4529ed71c3b1094b902f437a
| 22,495
|
py
|
Python
|
tables/scripts/ptrepack.py
|
robbmcleod/PyTables
|
7a3181f4fbdbbb4a0786f17df6e3126b7581fc91
|
[
"BSD-3-Clause"
] | null | null | null |
tables/scripts/ptrepack.py
|
robbmcleod/PyTables
|
7a3181f4fbdbbb4a0786f17df6e3126b7581fc91
|
[
"BSD-3-Clause"
] | null | null | null |
tables/scripts/ptrepack.py
|
robbmcleod/PyTables
|
7a3181f4fbdbbb4a0786f17df6e3126b7581fc91
|
[
"BSD-3-Clause"
] | 1
|
2020-05-26T08:28:03.000Z
|
2020-05-26T08:28:03.000Z
|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: February 10, 2004
# Author: Francesc Alted - faltet@pytables.com
#
# $Id$
#
########################################################################
"""This utility lets you repack your data files in a flexible way.
Pass the flag -h to this for help on usage.
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import time
import os.path
import argparse
import warnings
try:
from time import process_time as cputime
except ImportError:
from time import clock as cputime
from tables.file import open_file
from tables.group import Group
from tables.leaf import Filters
from tables.flavor import internal_flavor
from tables.exceptions import OldIndexWarning, NoSuchNodeError, FlavorWarning
# Global variables
verbose = False
regoldindexes = True
createsysattrs = True
numpy_aliases = [
'numeric',
'Numeric',
'numarray',
'NumArray',
'CharArray',
]
def newdst_group(dstfileh, dstgroup, title, filters):
group = dstfileh.root
# Now, create the new group. This works even if dstgroup == '/'
for nodename in dstgroup.split('/'):
if nodename == '':
continue
# First try if possible intermediate groups does already exist.
try:
group2 = dstfileh.get_node(group, nodename)
except NoSuchNodeError:
# The group does not exist. Create it.
group2 = dstfileh.create_group(group, nodename,
title=title,
filters=filters)
group = group2
return group
def recreate_indexes(table, dstfileh, dsttable):
listoldindexes = table._listoldindexes
if listoldindexes != []:
if not regoldindexes:
if verbose:
print("[I]Not regenerating indexes for table: '%s:%s'" %
(dstfileh.filename, dsttable._v_pathname))
return
# Now, recreate the indexed columns
if verbose:
print("[I]Regenerating indexes for table: '%s:%s'" %
(dstfileh.filename, dsttable._v_pathname))
for colname in listoldindexes:
if verbose:
print("[I]Indexing column: '%s'. Please wait..." % colname)
colobj = dsttable.cols._f_col(colname)
# We don't specify the filters for the indexes
colobj.create_index(filters=None)
def copy_leaf(srcfile, dstfile, srcnode, dstnode, title,
filters, copyuserattrs, overwritefile, overwrtnodes, stats,
start, stop, step, chunkshape, sortby, check_CSI,
propindexes, upgradeflavors):
# Open the source file
srcfileh = open_file(srcfile, 'r')
# Get the source node (that should exist)
srcnode = srcfileh.get_node(srcnode)
# Get the destination node and its parent
last_slash = dstnode.rindex('/')
if last_slash == len(dstnode)-1:
# print("Detected a trailing slash in destination node. "
# "Interpreting it as a destination group.")
dstgroup = dstnode[:-1]
elif last_slash > 0:
dstgroup = dstnode[:last_slash]
else:
dstgroup = "/"
dstleaf = dstnode[last_slash + 1:]
if dstleaf == "":
dstleaf = srcnode.name
# Check whether the destination group exists or not
if os.path.isfile(dstfile) and not overwritefile:
dstfileh = open_file(dstfile, 'a', pytables_sys_attrs=createsysattrs)
try:
dstgroup = dstfileh.get_node(dstgroup)
except:
# The dstgroup does not seem to exist. Try creating it.
dstgroup = newdst_group(dstfileh, dstgroup, title, filters)
else:
# The node exists, but it is really a group?
if not isinstance(dstgroup, Group):
# No. Should we overwrite it?
if overwrtnodes:
parent = dstgroup._v_parent
last_slash = dstgroup._v_pathname.rindex('/')
dstgroupname = dstgroup._v_pathname[last_slash + 1:]
dstgroup.remove()
dstgroup = dstfileh.create_group(parent, dstgroupname,
title=title,
filters=filters)
else:
raise RuntimeError("Please check that the node names are "
"not duplicated in destination, and "
"if so, add the --overwrite-nodes "
"flag if desired.")
else:
# The destination file does not exist or will be overwritten.
dstfileh = open_file(dstfile, 'w', title=title, filters=filters,
pytables_sys_attrs=createsysattrs)
dstgroup = newdst_group(dstfileh, dstgroup, title="", filters=filters)
# Finally, copy srcnode to dstnode
try:
dstnode = srcnode.copy(
dstgroup, dstleaf, filters=filters,
copyuserattrs=copyuserattrs, overwrite=overwrtnodes,
stats=stats, start=start, stop=stop, step=step,
chunkshape=chunkshape,
sortby=sortby, check_CSI=check_CSI, propindexes=propindexes)
except:
(type_, value, traceback) = sys.exc_info()
print("Problems doing the copy from '%s:%s' to '%s:%s'" %
(srcfile, srcnode, dstfile, dstnode))
print("The error was --> %s: %s" % (type_, value))
print("The destination file looks like:\n", dstfileh)
# Close all the open files:
srcfileh.close()
dstfileh.close()
raise RuntimeError("Please check that the node names are not "
"duplicated in destination, and if so, add "
"the --overwrite-nodes flag if desired.")
# Upgrade flavors in dstnode, if required
if upgradeflavors:
if srcfileh.format_version.startswith("1"):
# Remove original flavor in case the source file has 1.x format
dstnode.del_attr('FLAVOR')
elif srcfileh.format_version < "2.1":
if dstnode.get_attr('FLAVOR') in numpy_aliases:
dstnode.set_attr('FLAVOR', internal_flavor)
# Recreate possible old indexes in destination node
if srcnode._c_classid == "TABLE":
recreate_indexes(srcnode, dstfileh, dstnode)
# Close all the open files:
srcfileh.close()
dstfileh.close()
def copy_children(srcfile, dstfile, srcgroup, dstgroup, title,
recursive, filters, copyuserattrs, overwritefile,
overwrtnodes, stats, start, stop, step,
chunkshape, sortby, check_CSI, propindexes,
upgradeflavors, use_hardlinks=True):
"""Copy the children from source group to destination group"""
# Open the source file with srcgroup as root_uep
srcfileh = open_file(srcfile, 'r', root_uep=srcgroup)
# Assign the root to srcgroup
srcgroup = srcfileh.root
created_dstgroup = False
# Check whether the destination group exists or not
if os.path.isfile(dstfile) and not overwritefile:
dstfileh = open_file(dstfile, 'a', pytables_sys_attrs=createsysattrs)
try:
dstgroup = dstfileh.get_node(dstgroup)
except NoSuchNodeError:
# The dstgroup does not seem to exist. Try creating it.
dstgroup = newdst_group(dstfileh, dstgroup, title, filters)
created_dstgroup = True
else:
# The node exists, but it is really a group?
if not isinstance(dstgroup, Group):
# No. Should we overwrite it?
if overwrtnodes:
parent = dstgroup._v_parent
last_slash = dstgroup._v_pathname.rindex('/')
dstgroupname = dstgroup._v_pathname[last_slash + 1:]
dstgroup.remove()
dstgroup = dstfileh.create_group(parent, dstgroupname,
title=title,
filters=filters)
else:
raise RuntimeError("Please check that the node names are "
"not duplicated in destination, and "
"if so, add the --overwrite-nodes "
"flag if desired.")
else:
# The destination file does not exist or will be overwritten.
dstfileh = open_file(dstfile, 'w', title=title, filters=filters,
pytables_sys_attrs=createsysattrs)
dstgroup = newdst_group(dstfileh, dstgroup, title="", filters=filters)
created_dstgroup = True
# Copy the attributes to dstgroup, if needed
if created_dstgroup and copyuserattrs:
srcgroup._v_attrs._f_copy(dstgroup)
# Finally, copy srcgroup children to dstgroup
try:
srcgroup._f_copy_children(
dstgroup, recursive=recursive, filters=filters,
copyuserattrs=copyuserattrs, overwrite=overwrtnodes,
stats=stats, start=start, stop=stop, step=step,
chunkshape=chunkshape,
sortby=sortby, check_CSI=check_CSI, propindexes=propindexes,
use_hardlinks=use_hardlinks)
except:
(type_, value, traceback) = sys.exc_info()
print("Problems doing the copy from '%s:%s' to '%s:%s'" %
(srcfile, srcgroup, dstfile, dstgroup))
print("The error was --> %s: %s" % (type_, value))
print("The destination file looks like:\n", dstfileh)
# Close all the open files:
srcfileh.close()
dstfileh.close()
raise RuntimeError("Please check that the node names are not "
"duplicated in destination, and if so, add the "
"--overwrite-nodes flag if desired. In "
"particular, pay attention that root_uep is not "
"fooling you.")
# Upgrade flavors in dstnode, if required
if upgradeflavors:
for dstnode in dstgroup._f_walknodes("Leaf"):
if srcfileh.format_version.startswith("1"):
# Remove original flavor in case the source file has 1.x format
dstnode.del_attr('FLAVOR')
elif srcfileh.format_version < "2.1":
if dstnode.get_attr('FLAVOR') in numpy_aliases:
dstnode.set_attr('FLAVOR', internal_flavor)
# Convert the remaining tables with old indexes (if any)
for table in srcgroup._f_walknodes("Table"):
dsttable = dstfileh.get_node(dstgroup, table._v_pathname)
recreate_indexes(table, dstfileh, dsttable)
# Close all the open files:
srcfileh.close()
dstfileh.close()
def _get_parser():
parser = argparse.ArgumentParser(
description='''This utility is very powerful and lets you copy any
leaf, group or complete subtree into another file.
During the copy process you are allowed to change the filter
properties if you want so. Also, in the case of duplicated pathnames,
you can decide if you want to overwrite already existing nodes on the
destination file. Generally speaking, ptrepack can be useful in may
situations, like replicating a subtree in another file, change the
filters in objects and see how affect this to the compression degree
or I/O performance, consolidating specific data in repositories or
even *importing* generic HDF5 files and create true PyTables
counterparts.''')
parser.add_argument(
'-v', '--verbose', action='store_true',
help='show verbose information',
)
parser.add_argument(
'-o', '--overwrite', action='store_true', dest='overwritefile',
help='overwrite destination file',
)
parser.add_argument(
'-R', '--range', dest='rng', metavar='RANGE',
help='''select a RANGE of rows (in the form "start,stop,step")
during the copy of *all* the leaves.
Default values are "None,None,1", which means a copy of all the
rows.''',
)
parser.add_argument(
'--non-recursive', action='store_false', default=True,
dest='recursive',
help='do not do a recursive copy. Default is to do it',
)
parser.add_argument(
'--dest-title', dest='title', default='',
help='title for the new file (if not specified, the source is copied)',
)
parser.add_argument(
'--dont-create-sysattrs', action='store_false', default=True,
dest='createsysattrs',
help='do not create sys attrs (default is to do it)',
)
parser.add_argument(
'--dont-copy-userattrs', action='store_false', default=True,
dest='copyuserattrs',
help='do not copy the user attrs (default is to do it)',
)
parser.add_argument(
'--overwrite-nodes', action='store_true', dest='overwrtnodes',
help='''overwrite destination nodes if they exist.
Default is to not overwrite them''',
)
parser.add_argument(
'--complevel', type=int, default=0,
help='''set a compression level (0 for no compression, which is the
default)''',
)
parser.add_argument(
'--complib', choices=(
"zlib", "lzo", "bzip2", "blosc", "blosc:blosclz",
"blosc:lz4", "blosc:lz4hc", "blosc:snappy",
"blosc:zlib", "blosc:zstd"), default='zlib',
help='''set the compression library to be used during the copy.
Defaults to %(default)s''',
)
parser.add_argument(
'--shuffle', type=int, choices=(0, 1),
help='''activate or not the shuffle filter (default is active if
complevel > 0)''',
)
parser.add_argument(
'--bitshuffle', type=int, choices=(0, 1),
help='''activate or not the bitshuffle filter (not active by default)''',
)
parser.add_argument(
'--fletcher32', type=int, choices=(0, 1),
help='''whether to activate or not the fletcher32 filter (not active
by default)''',
)
parser.add_argument(
'--keep-source-filters', action='store_true', dest='keepfilters',
help='''use the original filters in source files.
The default is not doing that if any of --complevel, --complib,
--shuffle --bitshuffle or --fletcher32 option is specified''',
)
parser.add_argument(
'--chunkshape', default='keep',
help='''set a chunkshape.
Possible options are: "keep" | "auto" | int | tuple.
A value of "auto" computes a sensible value for the chunkshape of the
leaves copied. The default is to "keep" the original value''',
)
parser.add_argument(
'--upgrade-flavors', action='store_true', dest='upgradeflavors',
help='''when repacking PyTables 1.x or PyTables 2.x files, the flavor
of leaves will be unset. With this, such a leaves will be serialized
as objects with the internal flavor ('numpy' for 3.x series)''',
)
parser.add_argument(
'--dont-regenerate-old-indexes', action='store_false', default=True,
dest='regoldindexes',
help='''disable regenerating old indexes.
The default is to regenerate old indexes as they are found''',
)
parser.add_argument(
'--sortby', metavar='COLUMN',
help='''do a table copy sorted by the index in "column".
For reversing the order, use a negative value in the "step" part of
"RANGE" (see "-r" flag). Only applies to table objects''',
)
parser.add_argument(
'--checkCSI', action='store_true',
help='Force the check for a CSI index for the --sortby column',
)
parser.add_argument(
'--propindexes', action='store_true',
help='''propagate the indexes existing in original tables. The default
is to not propagate them. Only applies to table objects''',
)
parser.add_argument(
'src', metavar='sourcefile:sourcegroup', help='source file/group',
)
parser.add_argument(
'dst', metavar='destfile:destgroup', help='destination file/group',
)
return parser
def main():
global verbose
global regoldindexes
global createsysattrs
parser = _get_parser()
args = parser.parse_args()
# check arguments
if args.rng:
try:
args.rng = eval("slice(" + args.rng + ")")
except Exception:
parser.error("Error when getting the range parameter.")
if args.chunkshape.isdigit() or args.chunkshape.startswith('('):
args.chunkshape = eval(args.chunkshape)
if args.complevel < 0 or args.complevel > 9:
parser.error(
'invalid "complevel" value, it sould be in te range [0, 9]'
)
# Catch the files passed as the last arguments
src = args.src.rsplit(':', 1)
dst = args.dst.rsplit(':', 1)
if len(src) == 1:
srcfile, srcnode = src[0], "/"
else:
srcfile, srcnode = src
if len(dst) == 1:
dstfile, dstnode = dst[0], "/"
else:
dstfile, dstnode = dst
if srcnode == "":
# case where filename == "filename:" instead of "filename:/"
srcnode = "/"
if dstnode == "":
# case where filename == "filename:" instead of "filename:/"
dstnode = "/"
# Ignore the warnings for tables that contains oldindexes
# (these will be handled by the copying routines)
warnings.filterwarnings("ignore", category=OldIndexWarning)
# Ignore the flavors warnings during upgrading flavor operations
if args.upgradeflavors:
warnings.filterwarnings("ignore", category=FlavorWarning)
# Build the Filters instance
filter_params = (
args.complevel,
args.complib,
args.shuffle,
args.bitshuffle,
args.fletcher32,
)
if (filter_params == (None,) * 4 or args.keepfilters):
filters = None
else:
if args.complevel is None:
args.complevel = 0
if args.shuffle is None:
if args.complevel > 0:
args.shuffle = True
else:
args.shuffle = False
if args.bitshuffle is None:
args.bitshuffle = False
if args.bitshuffle:
# Shuffle and bitshuffle are mutually exclusive
args.shuffle = False
if args.complib is None:
args.complib = "zlib"
if args.fletcher32 is None:
args.fletcher32 = False
filters = Filters(complevel=args.complevel, complib=args.complib,
shuffle=args.shuffle, bitshuffle=args.bitshuffle,
fletcher32=args.fletcher32)
# The start, stop and step params:
start, stop, step = None, None, 1 # Defaults
if args.rng:
start, stop, step = args.rng.start, args.rng.stop, args.rng.step
# Set globals
verbose = args.verbose
regoldindexes = args.regoldindexes
createsysattrs = args.createsysattrs
# Some timing
t1 = time.time()
cpu1 = cputime()
# Copy the file
if verbose:
print("+=+" * 20)
print("Recursive copy:", args.recursive)
print("Applying filters:", filters)
if args.sortby is not None:
print("Sorting table(s) by column:", args.sortby)
print("Forcing a CSI creation:", args.checkCSI)
if args.propindexes:
print("Recreating indexes in copied table(s)")
print("Start copying %s:%s to %s:%s" % (srcfile, srcnode,
dstfile, dstnode))
print("+=+" * 20)
# Check whether the specified source node is a group or a leaf
h5srcfile = open_file(srcfile, 'r')
srcnodeobject = h5srcfile.get_node(srcnode)
# Close the file again
h5srcfile.close()
stats = {'groups': 0, 'leaves': 0, 'links': 0, 'bytes': 0, 'hardlinks': 0}
if isinstance(srcnodeobject, Group):
copy_children(
srcfile, dstfile, srcnode, dstnode,
title=args.title, recursive=args.recursive, filters=filters,
copyuserattrs=args.copyuserattrs, overwritefile=args.overwritefile,
overwrtnodes=args.overwrtnodes, stats=stats,
start=start, stop=stop, step=step, chunkshape=args.chunkshape,
sortby=args.sortby, check_CSI=args.checkCSI,
propindexes=args.propindexes,
upgradeflavors=args.upgradeflavors,
use_hardlinks=True)
else:
# If not a Group, it should be a Leaf
copy_leaf(
srcfile, dstfile, srcnode, dstnode,
title=args.title, filters=filters,
copyuserattrs=args.copyuserattrs,
overwritefile=args.overwritefile, overwrtnodes=args.overwrtnodes,
stats=stats, start=start, stop=stop, step=step,
chunkshape=args.chunkshape,
sortby=args.sortby, check_CSI=args.checkCSI,
propindexes=args.propindexes,
upgradeflavors=args.upgradeflavors)
# Gather some statistics
t2 = time.time()
cpu2 = cputime ()
tcopy = round(t2 - t1, 3)
cpucopy = round(cpu2 - cpu1, 3)
try:
tpercent = int(round(cpucopy / tcopy, 2) * 100)
except ZeroDivisionError:
tpercent = 'NaN'
if verbose:
ngroups = stats['groups']
nleaves = stats['leaves']
nlinks = stats['links']
nhardlinks = stats['hardlinks']
nbytescopied = stats['bytes']
nnodes = ngroups + nleaves + nlinks + nhardlinks
print(
"Groups copied:", ngroups,
", Leaves copied:", nleaves,
", Links copied:", nlinks,
", Hard links copied:", nhardlinks,
)
if args.copyuserattrs:
print("User attrs copied")
else:
print("User attrs not copied")
print("KBytes copied:", round(nbytescopied / 1024., 3))
print("Time copying: %s s (real) %s s (cpu) %s%%" % (
tcopy, cpucopy, tpercent))
print("Copied nodes/sec: ", round((nnodes) / float(tcopy), 1))
print("Copied KB/s :", int(nbytescopied / (tcopy * 1024)))
| 38.918685
| 81
| 0.593776
|
7953198a4dad26e4354a8cc05e281d355e69422b
| 936
|
py
|
Python
|
metroid/migrations/0001_initial.py
|
Bryhn-Bjolgerud/metroid
|
bb29a9cbd240b7c78643b74b017040079d381e24
|
[
"MIT"
] | 23
|
2021-04-06T13:03:09.000Z
|
2022-03-11T13:47:54.000Z
|
metroid/migrations/0001_initial.py
|
Bryhn-Bjolgerud/metroid
|
bb29a9cbd240b7c78643b74b017040079d381e24
|
[
"MIT"
] | 4
|
2021-07-13T10:17:38.000Z
|
2022-02-02T15:01:59.000Z
|
metroid/migrations/0001_initial.py
|
Bryhn-Bjolgerud/metroid
|
bb29a9cbd240b7c78643b74b017040079d381e24
|
[
"MIT"
] | 2
|
2021-10-04T07:48:47.000Z
|
2022-02-02T13:29:06.000Z
|
# Generated by Django 3.1.5 on 2021-02-01 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name='FailedMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('topic_name', models.CharField(max_length=255)),
('subscription_name', models.CharField(max_length=255)),
('subject', models.CharField(max_length=255)),
('message', models.JSONField()),
('exception_str', models.TextField()),
('traceback', models.TextField()),
('correlation_id', models.CharField(blank=True, max_length=36)),
],
),
]
| 33.428571
| 114
| 0.574786
|
79531aacdbb392d1efbc936d67f18d59e78771e7
| 1,645
|
py
|
Python
|
test/rules/functions/test_find_in_map.py
|
j0lly/cfn-python-lint
|
3032bab8fe190763bd0df1c34905c3528ceb411f
|
[
"MIT-0"
] | 1
|
2019-03-19T22:49:38.000Z
|
2019-03-19T22:49:38.000Z
|
test/rules/functions/test_find_in_map.py
|
j0lly/cfn-python-lint
|
3032bab8fe190763bd0df1c34905c3528ceb411f
|
[
"MIT-0"
] | null | null | null |
test/rules/functions/test_find_in_map.py
|
j0lly/cfn-python-lint
|
3032bab8fe190763bd0df1c34905c3528ceb411f
|
[
"MIT-0"
] | 1
|
2020-05-04T16:32:19.000Z
|
2020-05-04T16:32:19.000Z
|
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.functions.FindInMap import FindInMap # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestRulesFindInMap(BaseRuleTestCase):
"""Test Rules Get Att """
def setUp(self):
"""Setup"""
super(TestRulesFindInMap, self).setUp()
self.collection.register(FindInMap())
self.success_templates = [
'fixtures/templates/good/functions_findinmap.yaml',
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('fixtures/templates/bad/functions_findinmap.yaml', 5)
| 43.289474
| 87
| 0.731307
|
79531afeca35fa04b4b0e9adef01dced320a614c
| 1,237
|
py
|
Python
|
tests/unit/test_count_reads.py
|
mlin/idseq-dag
|
b474fd434b95a3a65b53c7d9f875449ca3f42172
|
[
"MIT"
] | null | null | null |
tests/unit/test_count_reads.py
|
mlin/idseq-dag
|
b474fd434b95a3a65b53c7d9f875449ca3f42172
|
[
"MIT"
] | null | null | null |
tests/unit/test_count_reads.py
|
mlin/idseq-dag
|
b474fd434b95a3a65b53c7d9f875449ca3f42172
|
[
"MIT"
] | null | null | null |
import os
import sys
import tempfile
import unittest
from idseq_dag.util.count import count_reads
from idseq_dag.exceptions import InvalidInputFileError
class TestCountReads(unittest.TestCase):
def test_count_reads(self):
expect_reads = {
os.path.join(os.path.dirname(__file__), "fixtures", "reads.fasta"): 402,
os.path.join(os.path.dirname(__file__), "fixtures", "reads.fasta.gz"): 402,
os.path.join(os.path.dirname(__file__), "fixtures", "reads.fastq"): 100,
os.path.join(os.path.dirname(__file__), "fixtures", "reads.fastq.gz"): 100
}
for filename in expect_reads:
self.assertEqual(count_reads(filename), expect_reads[filename])
with tempfile.NamedTemporaryFile() as tf:
tf.write(b"test")
tf.flush()
with self.assertRaises(InvalidInputFileError):
count_reads(tf.name)
with tempfile.NamedTemporaryFile() as tf:
with open(os.path.join(os.path.dirname(__file__), "fixtures", "reads.fastq"), "rb") as fh:
tf.write(fh.read(90))
tf.flush()
with self.assertRaises(InvalidInputFileError):
count_reads(tf.name)
| 38.65625
| 102
| 0.632983
|
79531bdb2b60db1099d13fe57cde9dd4de12d9ce
| 15,543
|
py
|
Python
|
manimlib/mobject/svg/svg_mobject.py
|
sanjaydatasciencedojo/manim
|
603a1a21dbb5eca325ed670f46ea72401a8edf1d
|
[
"MIT"
] | null | null | null |
manimlib/mobject/svg/svg_mobject.py
|
sanjaydatasciencedojo/manim
|
603a1a21dbb5eca325ed670f46ea72401a8edf1d
|
[
"MIT"
] | null | null | null |
manimlib/mobject/svg/svg_mobject.py
|
sanjaydatasciencedojo/manim
|
603a1a21dbb5eca325ed670f46ea72401a8edf1d
|
[
"MIT"
] | null | null | null |
import itertools as it
import re
import string
import warnings
from xml.dom import minidom
from manimlib.constants import *
from manimlib.mobject.geometry import Circle, Rectangle, RoundedRectangle
from manimlib.mobject.types.vectorized_mobject import VGroup, VMobject
from manimlib.utils.color import *
from manimlib.utils.config_ops import digest_config, digest_locals
def string_to_numbers(num_string):
num_string = num_string.replace("-", ",-")
num_string = num_string.replace("e,-", "e-")
return [
float(s)
for s in re.split("[ ,]", num_string)
if s != ""
]
class SVGMobject(VMobject):
CONFIG = {
"should_center": True,
"height": 2,
"width": None,
# Must be filled in in a subclass, or when called
"file_name": None,
"unpack_groups": True, # if False, creates a hierarchy of VGroups
"stroke_width": DEFAULT_STROKE_WIDTH,
"fill_opacity": 1.0,
# "fill_color" : LIGHT_GREY,
}
def __init__(self, file_name=None, **kwargs):
digest_config(self, kwargs)
self.file_name = file_name or self.file_name
self.ensure_valid_file()
VMobject.__init__(self, **kwargs)
self.move_into_position()
def ensure_valid_file(self):
if self.file_name is None:
raise Exception("Must specify file for SVGMobject")
possible_paths = [
os.path.join(os.path.join("assets", "svg_images"), self.file_name),
os.path.join(os.path.join("assets", "svg_images"), self.file_name + ".svg"),
os.path.join(os.path.join("assets", "svg_images"), self.file_name + ".xdv"),
self.file_name,
]
for path in possible_paths:
if os.path.exists(path):
self.file_path = path
return
raise IOError("No file matching %s in image directory" %
self.file_name)
def generate_points(self):
doc = minidom.parse(self.file_path)
self.ref_to_element = {}
for svg in doc.getElementsByTagName("svg"):
mobjects = self.get_mobjects_from(svg)
if self.unpack_groups:
self.add(*mobjects)
else:
self.add(*mobjects[0].submobjects)
doc.unlink()
def get_mobjects_from(self, element):
result = []
if not isinstance(element, minidom.Element):
return result
if element.tagName == 'defs':
self.update_ref_to_element(element)
elif element.tagName == 'style':
pass # TODO, handle style
elif element.tagName in ['g', 'svg', 'symbol']:
result += it.chain(*[
self.get_mobjects_from(child)
for child in element.childNodes
])
elif element.tagName == 'path':
temp = element.getAttribute('d')
if temp != '':
result.append(self.path_string_to_mobject(temp))
elif element.tagName == 'use':
result += self.use_to_mobjects(element)
elif element.tagName == 'rect':
result.append(self.rect_to_mobject(element))
elif element.tagName == 'circle':
result.append(self.circle_to_mobject(element))
elif element.tagName == 'ellipse':
result.append(self.ellipse_to_mobject(element))
elif element.tagName in ['polygon', 'polyline']:
result.append(self.polygon_to_mobject(element))
else:
pass # TODO
# warnings.warn("Unknown element type: " + element.tagName)
result = [m for m in result if m is not None]
self.handle_transforms(element, VGroup(*result))
if len(result) > 1 and not self.unpack_groups:
result = [VGroup(*result)]
return result
def g_to_mobjects(self, g_element):
mob = VGroup(*self.get_mobjects_from(g_element))
self.handle_transforms(g_element, mob)
return mob.submobjects
def path_string_to_mobject(self, path_string):
return VMobjectFromSVGPathstring(path_string)
def use_to_mobjects(self, use_element):
# Remove initial "#" character
ref = use_element.getAttribute("xlink:href")[1:]
if ref not in self.ref_to_element:
warnings.warn("%s not recognized" % ref)
return VGroup()
return self.get_mobjects_from(
self.ref_to_element[ref]
)
def attribute_to_float(self, attr):
stripped_attr = "".join([
char for char in attr
if char in string.digits + "." + "-"
])
return float(stripped_attr)
def polygon_to_mobject(self, polygon_element):
# TODO, This seems hacky...
path_string = polygon_element.getAttribute("points")
for digit in string.digits:
path_string = path_string.replace(" " + digit, " L" + digit)
path_string = "M" + path_string
return self.path_string_to_mobject(path_string)
# <circle class="st1" cx="143.8" cy="268" r="22.6"/>
def circle_to_mobject(self, circle_element):
x, y, r = [
self.attribute_to_float(
circle_element.getAttribute(key)
)
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "r")
]
return Circle(radius=r).shift(x * RIGHT + y * DOWN)
def ellipse_to_mobject(self, circle_element):
x, y, rx, ry = [
self.attribute_to_float(
circle_element.getAttribute(key)
)
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "rx", "ry")
]
return Circle().scale(rx * RIGHT + ry * UP).shift(x * RIGHT + y * DOWN)
def rect_to_mobject(self, rect_element):
fill_color = rect_element.getAttribute("fill")
stroke_color = rect_element.getAttribute("stroke")
stroke_width = rect_element.getAttribute("stroke-width")
corner_radius = rect_element.getAttribute("rx")
# input preprocessing
if fill_color in ["", "none", "#FFF", "#FFFFFF"] or Color(fill_color) == Color(WHITE):
opacity = 0
fill_color = BLACK # shdn't be necessary but avoids error msgs
if fill_color in ["#000", "#000000"]:
fill_color = WHITE
if stroke_color in ["", "none", "#FFF", "#FFFFFF"] or Color(stroke_color) == Color(WHITE):
stroke_width = 0
stroke_color = BLACK
if stroke_color in ["#000", "#000000"]:
stroke_color = WHITE
if stroke_width in ["", "none", "0"]:
stroke_width = 0
if corner_radius in ["", "0", "none"]:
corner_radius = 0
corner_radius = float(corner_radius)
if corner_radius == 0:
mob = Rectangle(
width=self.attribute_to_float(
rect_element.getAttribute("width")
),
height=self.attribute_to_float(
rect_element.getAttribute("height")
),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity
)
else:
mob = RoundedRectangle(
width=self.attribute_to_float(
rect_element.getAttribute("width")
),
height=self.attribute_to_float(
rect_element.getAttribute("height")
),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
corner_radius=corner_radius
)
mob.shift(mob.get_center() - mob.get_corner(UP + LEFT))
return mob
def handle_transforms(self, element, mobject):
x, y = 0, 0
try:
x = self.attribute_to_float(element.getAttribute('x'))
# Flip y
y = -self.attribute_to_float(element.getAttribute('y'))
mobject.shift(x * RIGHT + y * UP)
except:
pass
transform = element.getAttribute('transform')
try: # transform matrix
prefix = "matrix("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
transform = string_to_numbers(transform)
transform = np.array(transform).reshape([3, 2])
x = transform[2][0]
y = -transform[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
mob.points = np.dot(mob.points, matrix)
mobject.shift(x * RIGHT + y * UP)
except:
pass
try: # transform scale
prefix = "scale("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
scale_values = string_to_numbers(transform)
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
except:
pass
try: # transform translate
prefix = "translate("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
x, y = string_to_numbers(transform)
mobject.shift(x * RIGHT + y * DOWN)
except:
pass
# TODO, ...
def flatten(self, input_list):
output_list = []
for i in input_list:
if isinstance(i, list):
output_list.extend(self.flatten(i))
else:
output_list.append(i)
return output_list
def get_all_childNodes_have_id(self, element):
all_childNodes_have_id = []
if not isinstance(element, minidom.Element):
return
if element.hasAttribute('id'):
return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
def update_ref_to_element(self, defs):
new_refs = dict([(e.getAttribute('id'), e) for e in self.get_all_childNodes_have_id(defs)])
self.ref_to_element.update(new_refs)
def move_into_position(self):
if self.should_center:
self.center()
if self.height is not None:
self.set_height(self.height)
if self.width is not None:
self.set_width(self.width)
class VMobjectFromSVGPathstring(VMobject):
def __init__(self, path_string, **kwargs):
digest_locals(self)
VMobject.__init__(self, **kwargs)
def get_path_commands(self):
result = [
"M", # moveto
"L", # lineto
"H", # horizontal lineto
"V", # vertical lineto
"C", # curveto
"S", # smooth curveto
"Q", # quadratic Bezier curve
"T", # smooth quadratic Bezier curveto
"A", # elliptical Arc
"Z", # closepath
]
result += [s.lower() for s in result]
return result
def generate_points(self):
pattern = "[%s]" % ("".join(self.get_path_commands()))
pairs = list(zip(
re.findall(pattern, self.path_string),
re.split(pattern, self.path_string)[1:]
))
# Which mobject should new points be added to
self = self
for command, coord_string in pairs:
self.handle_command(command, coord_string)
# people treat y-coordinate differently
self.rotate(np.pi, RIGHT, about_point=ORIGIN)
def handle_command(self, command, coord_string):
isLower = command.islower()
command = command.upper()
# new_points are the points that will be added to the curr_points
# list. This variable may get modified in the conditionals below.
points = self.points
new_points = self.string_to_points(coord_string)
if isLower and len(points) > 0:
new_points += points[-1]
if command == "M": # moveto
self.start_new_path(new_points[0])
if len(new_points) <= 1:
return
# Draw relative line-to values.
points = self.points
new_points = new_points[1:]
command = "L"
for p in new_points:
if isLower:
# Treat everything as relative line-to until empty
p[0] += self.points[-1, 0]
p[1] += self.points[-1, 1]
self.add_line_to(p)
return
elif command in ["L", "H", "V"]: # lineto
if command == "H":
new_points[0, 1] = points[-1, 1]
elif command == "V":
if isLower:
new_points[0, 0] -= points[-1, 0]
new_points[0, 0] += points[-1, 1]
new_points[0, 1] = new_points[0, 0]
new_points[0, 0] = points[-1, 0]
self.add_line_to(new_points[0])
return
if command == "C": # curveto
pass # Yay! No action required
elif command in ["S", "T"]: # smooth curveto
self.add_smooth_curve_to(*new_points)
# handle1 = points[-1] + (points[-1] - points[-2])
# new_points = np.append([handle1], new_points, axis=0)
return
elif command == "Q": # quadratic Bezier curve
# TODO, this is a suboptimal approximation
new_points = np.append([new_points[0]], new_points, axis=0)
elif command == "A": # elliptical Arc
raise Exception("Not implemented")
elif command == "Z": # closepath
return
# Add first three points
self.add_cubic_bezier_curve_to(*new_points[0:3])
# Handle situations where there's multiple relative control points
if len(new_points) > 3:
# Add subsequent offset points relatively.
for i in range(3, len(new_points), 3):
if isLower:
new_points[i:i + 3] -= points[-1]
new_points[i:i + 3] += new_points[i - 1]
self.add_cubic_bezier_curve_to(*new_points[i:i+3])
def string_to_points(self, coord_string):
numbers = string_to_numbers(coord_string)
if len(numbers) % 2 == 1:
numbers.append(0)
num_points = len(numbers) // 2
result = np.zeros((num_points, self.dim))
result[:, :2] = np.array(numbers).reshape((num_points, 2))
return result
def get_original_path_string(self):
return self.path_string
| 36.400468
| 99
| 0.559802
|
79531c638474a32e2f3582865ee7ae37603ebe9d
| 9,424
|
py
|
Python
|
chapter13-mi-unsupervised/iic-13.5.1.py
|
gabrielmahia/obamAI
|
ba45f0a6efae793d7f5e356a1dbf5c6835a65dba
|
[
"MIT"
] | 1,291
|
2018-03-30T07:42:07.000Z
|
2022-03-31T20:27:55.000Z
|
chapter13-mi-unsupervised/iic-13.5.1.py
|
gabrielmahia/obamAI
|
ba45f0a6efae793d7f5e356a1dbf5c6835a65dba
|
[
"MIT"
] | 18
|
2019-01-01T16:50:28.000Z
|
2022-03-31T17:58:31.000Z
|
chapter13-mi-unsupervised/iic-13.5.1.py
|
gabrielmahia/obamAI
|
ba45f0a6efae793d7f5e356a1dbf5c6835a65dba
|
[
"MIT"
] | 798
|
2018-03-26T01:01:47.000Z
|
2022-03-31T06:33:07.000Z
|
"""Build, train and evaluate an IIC Model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.datasets import mnist
import numpy as np
import os
import argparse
import vgg
from data_generator import DataGenerator
from utils import unsupervised_labels, center_crop
from utils import AccuracyCallback, lr_schedule
class IIC:
def __init__(self,
args,
backbone):
"""Contains the encoder model, the loss function,
loading of datasets, train and evaluation routines
to implement IIC unsupervised clustering via mutual
information maximization
Arguments:
args : Command line arguments to indicate choice
of batch size, number of heads, folder to save
weights file, weights file name, etc
backbone (Model): IIC Encoder backbone (eg VGG)
"""
self.args = args
self.backbone = backbone
self._model = None
self.train_gen = DataGenerator(args, siamese=True)
self.n_labels = self.train_gen.n_labels
self.build_model()
self.load_eval_dataset()
self.accuracy = 0
def build_model(self):
"""Build the n_heads of the IIC model
"""
inputs = Input(shape=self.train_gen.input_shape, name='x')
x = self.backbone(inputs)
x = Flatten()(x)
# number of output heads
outputs = []
for i in range(self.args.heads):
name = "z_head%d" % i
outputs.append(Dense(self.n_labels,
activation='softmax',
name=name)(x))
self._model = Model(inputs, outputs, name='encoder')
optimizer = Adam(lr=1e-3)
self._model.compile(optimizer=optimizer, loss=self.mi_loss)
self._model.summary()
def mi_loss(self, y_true, y_pred):
"""Mutual information loss computed from the joint
distribution matrix and the marginals
Arguments:
y_true (tensor): Not used since this is
unsupervised learning
y_pred (tensor): stack of softmax predictions for
the Siamese latent vectors (Z and Zbar)
"""
size = self.args.batch_size
n_labels = y_pred.shape[-1]
# lower half is Z
Z = y_pred[0: size, :]
Z = K.expand_dims(Z, axis=2)
# upper half is Zbar
Zbar = y_pred[size: y_pred.shape[0], :]
Zbar = K.expand_dims(Zbar, axis=1)
# compute joint distribution (Eq 10.3.2 & .3)
P = K.batch_dot(Z, Zbar)
P = K.sum(P, axis=0)
# enforce symmetric joint distribution (Eq 10.3.4)
P = (P + K.transpose(P)) / 2.0
# normalization of total probability to 1.0
P = P / K.sum(P)
# marginal distributions (Eq 10.3.5 & .6)
Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
P = K.clip(P, K.epsilon(), np.finfo(float).max)
Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
# negative MI loss (Eq 10.3.7)
neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
# each head contribute 1/n_heads to the total loss
return neg_mi/self.args.heads
def train(self):
"""Train function uses the data generator,
accuracy computation, and learning rate
scheduler callbacks
"""
accuracy = AccuracyCallback(self)
lr_scheduler = LearningRateScheduler(lr_schedule,
verbose=1)
callbacks = [accuracy, lr_scheduler]
self._model.fit(x=self.train_gen,
use_multiprocessing=False,
epochs=self.args.epochs,
callbacks=callbacks,
shuffle=True)
def load_eval_dataset(self):
"""Pre-load test data for evaluation
"""
(_, _), (x_test, self.y_test) = self.args.dataset.load_data()
image_size = x_test.shape[1]
x_test = np.reshape(x_test,[-1, image_size, image_size, 1])
x_test = x_test.astype('float32') / 255
x_eval = np.zeros([x_test.shape[0], *self.train_gen.input_shape])
for i in range(x_eval.shape[0]):
x_eval[i] = center_crop(x_test[i])
self.x_test = x_eval
def load_weights(self):
"""Reload model weights for evaluation
"""
if self.args.restore_weights is None:
raise ValueError("Must load model weights for evaluation")
if self.args.restore_weights:
folder = "weights"
os.makedirs(folder, exist_ok=True)
path = os.path.join(folder, self.args.restore_weights)
print("Loading weights... ", path)
self._model.load_weights(path)
def eval(self):
"""Evaluate the accuracy of the current model weights
"""
y_pred = self._model.predict(self.x_test)
print("")
# accuracy per head
for head in range(self.args.heads):
if self.args.heads == 1:
y_head = y_pred
else:
y_head = y_pred[head]
y_head = np.argmax(y_head, axis=1)
accuracy = unsupervised_labels(list(self.y_test),
list(y_head),
self.n_labels,
self.n_labels)
info = "Head %d accuracy: %0.2f%%"
if self.accuracy > 0:
info += ", Old best accuracy: %0.2f%%"
data = (head, accuracy, self.accuracy)
else:
data = (head, accuracy)
print(info % data)
# if accuracy improves during training,
# save the model weights on a file
if accuracy > self.accuracy \
and self.args.save_weights is not None:
self.accuracy = accuracy
folder = self.args.save_dir
os.makedirs(folder, exist_ok=True)
path = os.path.join(folder, self.args.save_weights)
print("Saving weights... ", path)
self._model.save_weights(path)
@property
def model(self):
return self._model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='IIC Keras')
parser.add_argument('--save-dir',
default="weights",
help='Folder for storing model weights (h5)')
parser.add_argument('--save-weights',
default=None,
help='Folder for storing model weights (h5)')
parser.add_argument('--dataset',
default=mnist,
help='Dataset to use')
parser.add_argument('--epochs',
type=int,
default=1200,
metavar='N',
help='Number of epochs to train')
parser.add_argument('--batch-size',
type=int,
default=512,
metavar='N',
help='Train batch size')
parser.add_argument('--heads',
type=int,
default=1,
metavar='N',
help='Number of heads')
parser.add_argument('--train',
default=False,
action='store_true',
help='Train the model')
parser.add_argument('--restore-weights',
default=None,
help='Restore saved model weights')
parser.add_argument('--eval',
default=False,
action='store_true',
help='Evaluate a pre trained model. Must indicate weights file.')
parser.add_argument('--crop',
type=int,
default=4,
help='Pixels to crop from the image')
parser.add_argument('--plot-model',
default=False,
action='store_true',
help='Plot all network models')
args = parser.parse_args()
# build backbone
backbone = vgg.VGG(vgg.cfg['F'])
backbone.model.summary()
# instantiate IIC object
iic = IIC(args, backbone.model)
if args.plot_model:
plot_model(backbone.model,
to_file="model-vgg.png",
show_shapes=True)
plot_model(iic.model,
to_file="model-iic.png",
show_shapes=True)
if args.eval:
iic.load_weights()
iic.eval()
elif args.train:
iic.train()
| 36.246154
| 89
| 0.541384
|
79531c7a61143f17f43c5ca3d99d875d53e6ee91
| 4,093
|
py
|
Python
|
app.py
|
matcha1024/flask-blog
|
a5594f3d579e16f324173fafc0f9fa9b2a96f3a4
|
[
"MIT"
] | null | null | null |
app.py
|
matcha1024/flask-blog
|
a5594f3d579e16f324173fafc0f9fa9b2a96f3a4
|
[
"MIT"
] | null | null | null |
app.py
|
matcha1024/flask-blog
|
a5594f3d579e16f324173fafc0f9fa9b2a96f3a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Flask, request, render_template, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import os
import markdown
md = markdown.Markdown()
# My Global Ip Adress -> https://www.cman.jp/network/support/go_access.cgi
MY_IP = "123.45.67.89"
app = Flask(__name__)
db_uri = os.environ.get('DATABASE_URL')
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db = SQLAlchemy(app)
class Article(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.now)
article_title = db.Column(db.String(255))
article = db.Column(db.Text())
pv = db.Column(db.Integer)
tag = db.Column(db.String(80))
def __init__(self, created_at, article_title, article, pv, tag):
self.created_at = created_at
self.article_title = article_title
self.article = article
self.pv = pv
self.tag = tag
@app.route('/')
def index():
global MY_IP
articles = Article.query.order_by(Article.id).all()
articles.reverse()
pv_articles = Article.query.order_by(Article.pv).all()
pv_articles.reverse()
back_p = True
next_p = True
page = 1
if(request.args.get('p') is not None):
page = int(request.args.get('p'))
if(page == 1):
back_p = False
if(5*page >= len(articles)):
next_p = False
if(request.environ["HTTP_X_FORWARDED_FOR"] == MY_IP):
return render_template("top.html", admin=True, articles=articles[5*(page-1):5*page], pv_articles=pv_articles[:5], sorted="新着", back_p=back_p, next_p=next_p, now=page)
else:
return render_template("top.html", admin=False, articles=articles[5*(page-1):5*page], pv_articles=pv_articles[:5], sorted="新着", back_p=back_p, next_p=next_p, now=page)
@app.route("/new")
def new():
if(not request.environ["HTTP_X_FORWARDED_FOR"] == MY_IP):
return "ERROR: 権限がありません"
else:
return render_template("new.html")
@app.route("/upload", methods=["POST"])
def upload():
date=datetime.now()
article_title = request.form["title"]
article = request.form["article"]
tag = request.form["tag"]
new_article = Article(created_at=date, article_title=article_title, article=article, pv=0, tag=tag)
db.session.add(new_article)
db.session.commit()
return redirect("/")
@app.route("/archive/<art_id>")
def archive(art_id):
global MY_IP
articles = Article.query.filter_by(id=art_id).first()
md_convert_article = md.convert(articles.article)
articles.pv += 1
db.session.commit()
admin = False
if(request.environ["HTTP_X_FORWARDED_FOR"] == MY_IP):
admin = True
pv_articles = Article.query.order_by(Article.pv).all()
pv_articles.reverse()
return render_template("article.html", articles=articles, md_convert_article=md_convert_article, admin=admin, pv_articles=pv_articles[:5])
@app.route("/tag/<art_tag>")
def tag(art_tag):
articles = Article.query.filter_by(tag=art_tag).all()
articles.reverse()
pv_articles = Article.query.order_by(Article.pv).all()
pv_articles.reverse()
back_p = True
next_p = True
page = 1
if(request.args.get('p') is not None):
page = int(request.args.get('p'))
if(page == 1):
back_p = False
if(5*page >= len(articles)):
next_p = False
return render_template("top.html", admin=False, articles=articles[5*(page-1):5*page], pv_articles=pv_articles[:5], sorted=f"タグ: {art_tag}", back_p=back_p, next_p=next_p, now=page)
@app.route("/edit/<art_id>")
def edit(art_id):
global MY_IP
if(not request.environ["HTTP_X_FORWARDED_FOR"] == MY_IP):
return "ERROR: 権限がありません"
articles = Article.query.filter_by(id=art_id).first()
return render_template("edit.html", articles=articles)
@app.route("/edit_upload", methods=["POST"])
def edit_upload():
art_id = int(request.form["art_id"])
articles = Article.query.filter_by(id=art_id).first()
articles.article_title = request.form["title"]
articles.article = request.form["article"]
articles.tag = request.form["tag"]
db.session.commit()
return redirect("/")
if __name__ == '__main__':
app.run()
| 29.65942
| 181
| 0.697288
|
79531d5c6268d2abe89b46f4c3286a2f15ecb54e
| 1,964
|
py
|
Python
|
setup.py
|
eonu/daze
|
af1a0cc37bf5519bd5f03245e32df9dd2be0c030
|
[
"MIT"
] | null | null | null |
setup.py
|
eonu/daze
|
af1a0cc37bf5519bd5f03245e32df9dd2be0c030
|
[
"MIT"
] | 2
|
2021-01-20T07:17:51.000Z
|
2021-03-01T10:53:36.000Z
|
setup.py
|
eonu/daze
|
af1a0cc37bf5519bd5f03245e32df9dd2be0c030
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from setuptools import setup, find_packages
python_requires = '>=3.5,<3.10'
setup_requires = [
'Cython', # 'Cython>=0.28.5',
'numpy', # 'numpy>=1.17,<2',
'scipy' # 'scipy>=1.3,<2'
]
install_requires = [
'numpy', # 'numpy>=1.17,<2',
# 'scipy', # 'scipy>=1.3,<2',
'scikit-learn', # 'scikit-learn>=0.22,<1',
'matplotlib'
]
VERSION = '0.1.1'
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name = 'daze',
version = VERSION,
author = 'Edwin Onuonga',
author_email = 'ed@eonu.net',
description = 'Better multi-class confusion matrix plots for Scikit-Learn, incorporating per-class and overall evaluation measures.',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://github.com/eonu/daze',
project_urls = {
'Documentation': 'https://daze.readthedocs.io/en/latest',
'Bug Tracker': 'https://github.com/eonu/daze/issues',
'Source Code': 'https://github.com/eonu/daze'
},
license = 'MIT',
package_dir = {'': 'lib'},
packages = find_packages(where='lib'),
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: MacOS',
'Intended Audience :: Science/Research',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Natural Language :: English'
],
python_requires = python_requires,
setup_requires = setup_requires,
install_requires = install_requires
)
| 33.288136
| 137
| 0.613035
|
79531d8a7ac713e2f7045b881bef136edf6cbc4b
| 5,013
|
py
|
Python
|
pytext/models/seq_models/light_conv.py
|
yinghai/pytext
|
5457c157d7a5f39bb96e2f207560cc52d9b98c83
|
[
"BSD-3-Clause"
] | 6,199
|
2018-12-13T15:34:51.000Z
|
2022-03-26T04:08:58.000Z
|
pytext/models/seq_models/light_conv.py
|
yinghai/pytext
|
5457c157d7a5f39bb96e2f207560cc52d9b98c83
|
[
"BSD-3-Clause"
] | 1,356
|
2018-12-13T15:50:33.000Z
|
2022-03-03T20:45:58.000Z
|
pytext/models/seq_models/light_conv.py
|
yinghai/pytext
|
5457c157d7a5f39bb96e2f207560cc52d9b98c83
|
[
"BSD-3-Clause"
] | 842
|
2018-12-13T15:35:13.000Z
|
2022-03-23T13:27:00.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Optional
import torch.jit
import torch.nn as nn
from pytext.config import ConfigBase
from torch import Tensor
from .base import PyTextIncrementalDecoderComponent
from .utils import unfold1d
class LightweightConv(PyTextIncrementalDecoderComponent):
class Config(ConfigBase):
num_heads: int = 2
weight_softmax: bool = False
bias: bool = True
@classmethod
def from_config(cls, config, input_size, kernel_size, convolution_type):
return cls(input_size, kernel_size, convolution_type, **config._asdict())
def __init__(
self,
input_size,
kernel_size,
# ARBABU TODO : convert this to a enum
convolution_type: str,
num_heads,
weight_softmax,
bias,
):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
if convolution_type == "non-causal":
padding_l = (
kernel_size // 2
if kernel_size % 2 == 1
else ((kernel_size - 1) // 2, kernel_size // 2)
)
elif convolution_type == "causal":
padding_l = kernel_size - 1
else:
raise Exception("Convolution type not supported")
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
self.has_bias = bias
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size).view(1, 1, -1))
else:
self.bias = nn.Parameter(torch.Tensor())
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.has_bias:
nn.init.constant_(self.bias, 0.0)
def forward(self, x, incremental_state: Optional[Dict[str, Tensor]] = None):
"""Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
"""
output = self._forward_unfolded(x, incremental_state)
if self.has_bias:
output = output + self.bias
return output
def _forward_unfolded(
self, x, incremental_state: Optional[Dict[str, Tensor]] = None
):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
else:
# First decoder step
x_unfold = x.unsqueeze(3).clone()
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0.0)
x_unfold = x_unfold.view(T * B * H, R, K)
if incremental_state is not None:
weight = weight[:, -(x_unfold.size(2)) :]
K = weight.size(1)
weight = (
weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1)
)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def reorder_incremental_state(
self, incremental_state: Dict[str, Tensor], new_order: Tensor
):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state: Dict[str, Tensor]):
return self.get_incremental_state(incremental_state, "input_buffer")
def _set_input_buffer(
self, incremental_state: Dict[str, Tensor], new_buffer: Tensor
):
return self.set_incremental_state(incremental_state, "input_buffer", new_buffer)
def extra_repr(self):
s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format(
self.input_size,
self.kernel_size,
self.padding_l,
self.num_heads,
self.weight_softmax,
self.has_bias,
)
return s
| 34.572414
| 103
| 0.597048
|
79531e438794c049f1c0530320403de955841e2c
| 5,133
|
py
|
Python
|
wpcv/plp/data/quadnet.py
|
Peiiii/wpcv
|
56ed5327b921c52cd666c76bc204ac9ee5e5d150
|
[
"MIT"
] | null | null | null |
wpcv/plp/data/quadnet.py
|
Peiiii/wpcv
|
56ed5327b921c52cd666c76bc204ac9ee5e5d150
|
[
"MIT"
] | null | null | null |
wpcv/plp/data/quadnet.py
|
Peiiii/wpcv
|
56ed5327b921c52cd666c76bc204ac9ee5e5d150
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import division
import sys
import os
import config as cfg
sys.path.append(os.path.abspath('..'))
import cv2
import random, os, glob, json
from utils.centernet_utils import draw_points_heatmap
from torch.utils.data import dataset
import numpy as np
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from matplotlib import pyplot as plt
from utils.myutils import normalize
import wpcv
from wpcv.utils.augmentations import object_detection as transforms
from wpcv.utils.augmentations import base as BT
from wpcv.utils.transforms import pil as IMG
from torch.utils.data import dataset,dataloader
class Dataset(dataset.Dataset):
def __init__(self, imgs_dir, labels_dir, batch_size=8, classes=['0'], shuffle=True,input_size=cfg.TRAIN_INPUT_SIZE):
self.classes = classes
self.label2id = dict(zip(classes, range(0, len(classes))))
self.imgs_dir = imgs_dir
self.labels_dir = labels_dir
self.input_size=input_size
self.data_pairs = self._load_annotations()
self.shuffle = shuffle
if shuffle:
random.shuffle(self.data_pairs)
self.batch_size = batch_size
self.num_batches = len(self.data_pairs) // batch_size
self.currect_batch_index = 0
# random.seed(0)
self.transform=transforms.Compose([
transforms.ToPILImage(),
transforms.Limitsize(600),
transforms.RandomMultiChoice([
transforms.RandomRotate(30),
transforms.RandomShear(15,15),
transforms.RandomTranslate((100, 100)),
# transforms.RandomVerticalFlip(),
# transforms.RandomHorizontalFlip(),
],[0.2,0.2,0.2,0.5,0.5]
),
transforms.Zip([
BT.Compose([
BT.ColorJitter(brightness=0.2,contrast=0.2,saturation=0.2,hue=0.2),
BT.RandomApply([IMG.blur],p=0.3),
BT.RandomApply([IMG.edge_enhance],p=0.3)
]),
transforms.Identical(),
]),
transforms.Resize(self.input_size,keep_ratio=True,fillcolor=(0,0,0)),
])
# print(self.num_batches,self.data_pairs)
def _load_annotations(self):
'''
{shapes:[{points:[],label:""}]}
labeled using labeme
:return:
'''
fs = glob.glob(self.labels_dir + '/*.json')
annotations = []
for i, f in enumerate(fs):
img_path = self.imgs_dir + '/' + os.path.basename(f).replace('.json', '.jpg')
with open(f, 'r') as fp:
dic = json.load(fp)
# objects=[(obj['points'],obj['label']) for obj in dic['shapes']]
objects = [[*obj['points'], self.label2id[obj['label']]] for obj in dic['shapes']]
annotations.append((img_path, objects))
return annotations
def _preprocess(self, img, polygon):
# print(polygon)
img,[points]=self.transform(img,[polygon])
# wpcv.draw_polygon(img,points,width=5).show()
# raise
# print(img.size)
img = BT.cv2img(img)
# h,w=img.shape[:2]
# dst_w,dst_h=self.input_size
# scaleX,scaleY=dst_w/w,dst_h/h
# print(scaleX,scaleY)
# img = cv2.resize(img, (512, 512))
# points=np.array(points)*np.array([scaleX,scaleY])
img = img / 255
img = normalize(img)
# plt.matshow(img)
img = np.transpose(img, (2, 0, 1))
# print(img.shape)
points=(np.array(points)/4).astype(np.int)
heatmap = draw_points_heatmap(points, (128,128),radius=3)
# plt.matshow(heatmap)
# plt.show()
heatmap=np.expand_dims(heatmap,0)
# raise
return img, heatmap
def __iter__(self):
return self
def __getitem__(self, item):
return self.data_pairs[item]
def __next__(self):
start_point = self.batch_size * self.currect_batch_index
end_point = start_point + self.batch_size
data_pairs = self.data_pairs[start_point:end_point]
batch_image = []
batch_heatmap = []
for i, (img_path, objects) in enumerate(data_pairs):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
points = []
for obj in objects:
points += obj[:-1]
img, heatmap = self._preprocess(img, points)
# print(img.shape)
batch_image.append(img)
batch_heatmap.append(heatmap)
# print(batch_image)
batch_image = np.array(batch_image).astype(np.float32)
batch_heatmap = np.array(batch_heatmap).astype(np.float32)
if self.currect_batch_index >= self.num_batches - 1:
self.currect_batch_index = 0
random.shuffle(self.data_pairs)
raise StopIteration
else:
self.currect_batch_index += 1
return batch_image, batch_heatmap
def __len__(self):
return self.num_batches
| 34.449664
| 120
| 0.598091
|
79531e879f092a35fa6893f669deb7e3c9b2ecc6
| 23,106
|
py
|
Python
|
test/ltc_test_vectors.py
|
tonymorony/pywallet
|
fe51f2e8c53407001583c6496534bc8b0e221a4b
|
[
"MIT"
] | 16
|
2017-10-13T12:22:36.000Z
|
2021-05-06T08:26:04.000Z
|
test/ltc_test_vectors.py
|
prestonw/pywallet
|
fe51f2e8c53407001583c6496534bc8b0e221a4b
|
[
"MIT"
] | 1
|
2019-03-01T06:05:41.000Z
|
2019-03-01T06:05:41.000Z
|
test/ltc_test_vectors.py
|
prestonw/pywallet
|
fe51f2e8c53407001583c6496534bc8b0e221a4b
|
[
"MIT"
] | 15
|
2017-11-21T22:49:35.000Z
|
2021-04-26T23:22:18.000Z
|
################################################################################################
# Test Functions
################################################################################################
from lib import coin_types
from lib.base import *
coin = coin_types.ltc
class bip32_tests:
def __init__(self):
pass
def execute_all_tests(self):
print "Running BIP 32 Tests:"
print "Running test_vector_1 tests:"
self.test_vector_1()
print "Running test_vector_2 tests:"
self.test_vector_2()
print "Running test_vector_3 tests:"
self.test_vector_3()
def test_vector_1(self):
seed = binascii.unhexlify("000102030405060708090a0b0c0d0e0f")
print "Testing chain m...",
w = parse_path(coin, seed, "m")
assert w.serialize_public() == "Ltub2SSUS19CirucWFod2ZsYA2J4v4U76YiCXHdcQttnoiy5aGanFHCPDBX7utfG6f95u1cUbZJNafmvzNCzZZJTw1EmyFoL8u1gJbGM8ipu491"
assert w.serialize_private() == "Ltpv71G8qDifUiNetP6nmxPA5STrUVmv2J9YSmXajv8VsYBUyuPhvN9xCaQrfX2wo5xxJNtEazYCFRUu5FmokYMM79pcqz8pcdo4rNXAFPgyB4k"
print "passed"
print "Testing chain m/0'...",
w = parse_path(coin, seed, "m/0'")
assert w.serialize_public() == "Ltub2UhtRiSfp82berwLEKkB34QBEt2TUdCDCu4WNzGumvAMwYsxfWjULKsXhADxqy3cuDu3TnqoKJr1xmB8Wb2qzthWAtbb4CutpXPuSU1YMgG", w.serialize_public()
assert w.serialize_private() == "Ltpv73XYpw28ZyVe2zEVyiFnxUZxoKLGQNdZ8NxUi1WcqjNmMBgtLbh3KimGSnPHCoLv1RmvxHs4dnKmo1oXQ8dXuDu8uroxrbVxZPA1gXboYvx", w.serialize_private()
print "passed"
print "Testing chain m/0'/1...",
w = parse_path(coin, seed, "m/0'/1")
assert w.serialize_public() == "Ltub2Wt1dVzZCpufVJymxae3doHqJG1ZUevW9DjLyG3iiYxaB6P6PK9nHtmm7EgYFukxrwX6FDHuRuLVZ4uwyvCjgYXSU6SSXqvATFvgjLDteZ8", w.serialize_public()
assert w.serialize_private() == "Ltpv75hg2ia1xgNhsSGwhy9fZDTcrhKNQQMr4hdKJHHRnNAyajC24Q7MHHfVrqaLoj7xTWXcm7TViVHBvxKkXURWgPPaRdmgvMGpEBUPDQomMoz", w.serialize_private()
print "passed"
print "Testing chain m/0'/1/2'...",
w = parse_path(coin, seed, "m/0'/1/2'")
assert w.serialize_public() == "Ltub2ZVHg2pQuhm5MUmsDB3QzoKyXQt5kCWVUky2DbLstRL1awaDC4zDCLKgfFsNhnCHDTcprbGWoquU1Q4Eh1kGjzgH3zQacnyrAwqppbnDPZ9", w.serialize_public()
assert w.serialize_private() == "Ltpv78Jx5FPsfZE7jc52xZZ2vDVm5rBtfwwqQErzYcaaxEYQzaP8s9wnBjDRQsnxmxdSxyZ1MaQR8u76AA4W7VLhoUqEnFLF5HWkqTDbr5DovYB", w.serialize_private()
print "passed"
print "Testing chain m/0'/1/2'/2...",
w = parse_path(coin, seed, "m/0'/1/2'/2")
assert w.serialize_public() == "Ltub2bigWTwN6BS4RxFauSFVtJVHcEApNnpgvErKUYsMCrtcx3CaFqgaPuncLarm7aM1gmjzzbkTraoaZpQEnKBUTb9XxmxmSysgBdkfyFbascs", w.serialize_public()
assert w.serialize_private() == "Ltpv7AYLugWpr2u6p5Ykepm7oif5AfUdJYG2qikHoa74Gg72Mg1Vvve9PJgM6CCREd2t2mghyVdz3iZFdLxxJut3zsRHBVRLdNLTzRgmMZtMHv7", w.serialize_private()
print "passed"
print "Testing chain m/0'/1/2'/2/1000000000...",
w = parse_path(coin, seed, "m/0'/1/2'/2/1000000000")
assert w.serialize_public() == "Ltub2dSSz9YcDJpFxJ331ypEC1VHQTk8CHdiiVEsiqFVQwH7fAbxnFwEf1wfyQmhxqRjAU2YVwgGPnWBAEoFtAgKJrJeqKNrFTTJzbNbDMUZjYL", w.serialize_public()
assert w.serialize_private() == "Ltpv7CG7PN84yAHJLRLCmNKr7Rf4xu3w8354dy8r3rVCUkVX4oQtTLtoeQqQj3yd9Y9xeB5xkrcvtm6NdWyKqytn7q4pWzBZkH6BGmF86hsLPtJ", w.serialize_private()
print "passed"
def test_vector_2(self):
seed = binascii.unhexlify("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542")
print "Testing chain m...",
w = parse_path(coin, seed, "m")
assert w.serialize_public() == "Ltub2SSUS19CirucVsJx8iwpcE1qAFcXnAy2CsRLhqdcn75wsFbyRRyKe5giFzkEouW3oZrGWDxXykHBi9wDgkDd4vEiqBznyPWLcxwTQjJTyxX", w.serialize_public()
assert w.serialize_private() == "Ltpv71G8qDifUiNeszc7t7TSXeBcigvLhvQN8MKK2rsKqvJMGtQu6WvtdUaT1aozybX3YRdfLGzeXXX6AnVunxk3iX9PJQD4kyhoRd9PcKyWKRK", w.serialize_private()
print "passed"
print "Testing chain m/0...",
w = parse_path(coin, seed, "m/0")
assert w.serialize_public() == "Ltub2ViDhiqACsjh28uFGWKhg2RMXDMnV9TJm3Ahsef4rWwuZE3wzhKPnvFxqTi8bzWV1tLSNSxHNq89sKMuZtv3QWu17EjTsjeikT47quponTX", w.serialize_public()
assert w.serialize_private() == "Ltpv74Xt6wQcxjCjQGCR1tqKbSb95efbQttegX4gCftmvLAJxrrsfnGxnK9hb65ocfMsx5sVEHQNxgwDXJ8jMFF6ekJnJad89TsYZ33wyaWm4kk", w.serialize_private()
print "passed"
print "Testing chain m/0/2147483647'...",
w = parse_path(coin, seed, "m/0/2147483647'")
assert w.serialize_public() == "Ltub2WsGxKrgamuoC17RgxKM9N6uuxah2dczrCFEo3ZqWFiJ1XHQcajhzz3ZSqxFMj7aoQFz2Hotg3YkXzEFLoQA8fMdeKMXETujcqKigf4Rx1P", w.serialize_public()
assert w.serialize_private() == "Ltpv75gwMYS9LdNqa8QbSLpy4nGhUPtVxP4Lmg9D84oYa4vhRA6LHfhGzNwJCSZnLv6MrKCP1Jc21hSKxXsW5crEE3kLjJrTuyboLpPUjzJreuq", w.serialize_private()
print "passed"
print "Testing chain m/0/2147483647'/1...",
w = parse_path(coin, seed, "m/0/2147483647'/1")
assert w.serialize_public() == "Ltub2ZgFNLqckRCzHcnZkcTv7K39FVTC8pYGAdk6e7EAp94jgM9Zv6GQttRwHe9P9bosPaiXrvu8KAracnVGFhq7PzpYEbZNT1LwYbzfw9HojQB", w.serialize_public()
assert w.serialize_private() == "Ltpv78VumZR5WGg2fk5jVzyY2jCvovm14Zyc67e4y8TssxH95yxVbBDytHKg3EmQNdJ4AGZ5kbgQRF7YHEef8WNcEXZds5PGm5aBpcpDDiG4cb2", w.serialize_private()
print "passed"
print "Testing chain m/0/2147483647'/1/2147483646'...",
w = parse_path(coin, seed, "m/0/2147483647'/1/2147483646'")
assert w.serialize_public() == "Ltub2arHHJmyMpAhaa2AV6HTUpjLADvZBoAmCLTzApvaeuKz8hUW1mCRRQo2vJGtLyLKM2NAfRxqMnBSGReewawd7MWzWVss1JSMQq5FU6xuG3b", w.serialize_public()
assert w.serialize_private() == "Ltpv79fwgXMS7fdjxhKLEUo5QEu7ifEN7Yc77pMxVrAHiiYPYLHRgr9zQogmfwVo13gLhqqn4RTPoch86Mk2eTH6vNEoh1vauHbpACpjHV4yMM7", w.serialize_private()
print "passed"
print "Testing chain m/0/2147483647'/1/2147483646'/2...",
w = parse_path(coin, seed, "m/0/2147483647'/1/2147483646'/2")
assert w.serialize_public() == "Ltub2cDKEjzUszUwKqD4DAR9Ta7JZHTfS85qrW5sacH5HhBaCtp5BQ8Bbyk2zhMAUYh1s5aPwMUFFVCRkri9mgdXRcNa9fhwwfj668GS8jig9Sj", w.serialize_public()
assert w.serialize_private() == "Ltpv7B2ydxZwdqwyhxWDxYvmNzH67imUMsXBmyyqudWnMWPycXczrV5kbNdmkMAoA4mQk9hWMB6DFiXp8udYNmeKyLyXVsS5xhjXraAHN6qF1PS", w.serialize_private()
print "passed"
def test_vector_3(self):
seed = binascii.unhexlify("4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be")
print "Testing chain m...",
w = parse_path(coin, seed, "m")
assert w.serialize_public() == "Ltub2SSUS19CirucUvm7f7ScpDGs2twdY38HvDBu78fB3oAjqVqF5aDHbLvEjzHDdW5gyec8LDqYNR739Ta8XTG4VFtrxRu1hwxBQgT9wAsLQiK", w.serialize_public()
assert w.serialize_private() == "Ltpv71G8qDifUiNes44HQVxEjdSebLFSTnZdqh5sS9tt7cP9F8eAkfArajoyVaYAvckNd8JQuCgkRxFoXS7E2PtUVzedpgmhxR3miLokGHs2qhF", w.serialize_private()
print "passed"
print "Testing chain m/0'...",
w = parse_path(coin, seed, "m/0'")
assert w.serialize_public() == "Ltub2UogAxyLQEGxgTrWtFn7sU2XjFZ1gtYCKhRqvNM1z7676Wxhf4z9fohx3i7jMrgSB26y7BbHDNJXmvWUThCF4WAodXquFoSytrp4bdcg2zD", w.serialize_public()
assert w.serialize_private() == "Ltpv73dLaBYoA5k14b9gdeHjntCKHgrpcdyYFBKpFPaj3vJWW9mdL9wifCbgoLtXGa5J5Gwpv9Ud5gtGAykNbcUEFkrGFpgkhaKaqMmQDeHwDdo", w.serialize_private()
print "passed"
class bip39_tests:
def __init__(self):
pass
def execute_all_tests(self):
print "Running BIP 39 Tests:"
encoder = mnemonic()
passphrase = "TREZOR"
entropy = "0000000000000000000000000000000000000000000000000000000000000000"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNet1Um2aWvJmgmY8kY8kFT9pPASdWy7L5jWn9KmaJzQJpdTGLERnF5WXww9CkEkbXC1XqWAep1ix3gpg6qLx5hq9Ly8WnWnGn"
print "passed"
entropy = "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "bc09fca1804f7e69da93c2f2028eb238c227f2e9dda30cd63699232578480a4021b146ad717fbb7e451ce9eb835f43620bf5c514db0f8add49f5d121449d3e87"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNetWfT4va83aAe3DmJ1yGY59gSgyEnMGPBrrh2iCDk1NQqXWy9Bg9nXNsr4ESecwGYkbwq17F75fFMqXtYee6YkWWh9GFYqM1"
print "passed"
entropy = "8080808080808080808080808080808080808080808080808080808080808080"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNetB6MrGapBn3uN6kp3e8g7CdFCYRb4XT9dXHFdqJiMEya861TiD7qx2xZFy5kAvsgpBBfKvop5phqx6BeSH3kBB7HePfFskW"
print "passed"
entropy = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "dd48c104698c30cfe2b6142103248622fb7bb0ff692eebb00089b32d22484e1613912f0a5b694407be899ffd31ed3992c456cdf60f5d4564b8ba3f05a69890ad"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNesUtpSzZkDsF9Q6FVwGdHWFwC8bQzegkNx65TdRC5j5jmXXksm1W3FYsCgR5AtcKSYTvdj3QpYZmKmbFtgD6ZR82t3wGS1Lf"
print "passed"
entropy = "68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "64c87cde7e12ecf6704ab95bb1408bef047c22db4cc7491c4271d170a1b213d20b385bc1588d9c7b38f1b39d415665b8a9030c9ec653d75e65f847d8fc1fc440"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNesW6k9sufxmbMU3Ux6FPQsZ3wtcnNa8yngqz169XYsFePR5CtdSmzahhHnf1uBhB2KU4pJmHFvTmWkXquEQD8tJ69tuYHucD"
print "passed"
entropy = "9f6a2878b2520799a44ef18bc7df394e7061a224d2c33cd015b157d746869863"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "72be8e052fc4919d2adf28d5306b5474b0069df35b02303de8c1729c9538dbb6fc2d731d5f832193cd9fb6aeecbc469594a70e3dd50811b5067f3b88b28c3e8d"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNesV2MmfvW4ym6LMx2w3o3hTS2QyMuWvdxB59cZvvvzUVMtpF4qmTNWXjmpazbzYxuqZyMzMVvAntQoRt2vbmgzw2Cf5SKL4g"
print "passed"
entropy = "066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "26e975ec644423f4a4c4f4215ef09b4bd7ef924e85d1d17c4cf3f136c2863cf6df0a475045652c57eb5fb41513ca2a2d67722b77e954b4b3fc11f7590449191d"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNetptFHLPgYyeExypmXWDVMtiRaFxdjCKf4FNM5g5wb1iE5B4KgJPAL92HXEAzGvx9R2UZYecX8Yvz4skeqaS6EouN2F9bKCi"
print "passed"
entropy = "f585c11aec520db57dd353c69554b21a89b20fb0650966fa0a9d6f74fd989d8f"
print "Testing entropy " + entropy + "...",
phrase = encoder.encode(binascii.unhexlify(entropy))
assert phrase == "void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold"
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "01f5bced59dec48e362f2c45b5de68b9fd6c92c6634f44d6d40aab69056506f0e35524a518034ddc1192e1dacd32c1ed3eaa3c3b131c88ed8e7e54c49a5d0998"
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNet8WMrCoQxhEFReiAwzSWFpcRfhzvr2Vq9psgbdkra47QbUzwF7VDAenq52LEWntk33tjnHZ1SWLi8b12LdAepNLD9uAgfRx"
print "passed"
class bip44_tests:
def __init__(self):
pass
def execute_all_tests(self):
print "Running BIP 44 Tests:"
print "Running test_vector_1 tests:"
self.test_vector_1()
print "Running test_vector_2 tests:"
self.test_vector_2()
print "Running test_vector_3 tests:"
self.test_vector_3()
def test_vector_1(self):
encoder = mnemonic()
phrase = "cover tube shrug thought trick scout extra orphan spin banana civil error hockey ranch vivid round logic stable brass error fork duck bomb soup"
passphrase = ""
print "Testing phrase: " + phrase + "...",
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "a12e010b3cfad9dff772d098d101558171ecda084cd93417f9ecce999bfdeab251b9fa6427fb095bcd2dc07a8507b4bb0585574eb029bddeeeae70cb1bb1c741"
# BIP 32 Root Key
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNet4wgQfd6fB4xSj94FnpFTS7Ue5zt6vzN5tJ5vA7oUxuRW4kfQbEcXNWH9G2c5HsriikbURxPyTSgeAfauccZLBhAAHHPwnd"
# Account Extended Keys
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'")
assert w.serialize_private() == "Ltpv774HJN2KE7tnapjYF54TN8EfEao1hRst1dsE7Jbs3nLQ8pJvTXe2HJ4BpbkBysxoehKor68a3HSamjA4i1FnS9JwVznEzPJJywA7bPzgaWA"
assert w.serialize_public() == "Ltub2YEcu9SrUGRkChSNVgYqSi4sg9VCmgSY69yFnHN9yy7zjBVznSgTHuAT4x4pD9mZLew5yn7vjqMLxumUpEqGjmTEve8F7tHqnwSpPPL5K58"
# BIP 32 Extended Keys
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0")
assert w.serialize_private() == "Ltpv79GhE9tvGSHVcmYYo7vArZEGaZf4wvCG32SyoHnRghNkpezskrjE5bPrCbvKjzFuPqbzC88T5RJixyKKjGs9x7odMjhPQuvqkzSmUjGt8KF"
assert w.serialize_public() == "Ltub2aT2pwKTWapTEeFP3jQYw94V28MG2Akv7YZ1UGYictAMR2Bx5mmf6CW7T1LntWDikEgL3g8puzRy2MGVg83FAbw2BGYQz9qjaXvXyiHt3VS"
# Address 0
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/0")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LMmZDN5g66qUUwGm63gBrhcL4Mo8b6wsxH"
assert binascii.hexlify(ser_p(w.get_public_key())) == "031cb76d4373e0724c94d4c3b90796c77013b666b3846cfe501f46cdba7d074e52"
assert wallet(coin, w.get_private_key()).get_wif(True) == "T6TREaHcC2X5u2XExUb6h8bSkeLJooNSD1aXVQrooBV5XNf53fEp"
# Address 1
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/1")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LcU5ynFcA66eiYsss6jzRiXxRTsnAioz8n"
assert binascii.hexlify(ser_p(w.get_public_key())) == "021fce1230f72f747e757165c50da11d1758c6afe15dc681bfbfa42b6279870e97"
assert wallet(coin, w.get_private_key()).get_wif(True) == "T76xTmqLqFndu68VenCFMNFfFVDzsMKKM5wVFzThizP3QirvzpPY"
# Address 2
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/2")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "Lc6nqe7uNnXooU1zsvrCuyogmgUEsrKidS"
assert binascii.hexlify(ser_p(w.get_public_key())) == "02a0dcdff5eb706e4a025c4db767e44ce1df2a047de7a767560cb2581042cd6630"
assert wallet(coin, w.get_private_key()).get_wif(True) == "T8Hc5iP2Y5aAeRGm5A4kwtbdXWHStonC9FEWxjRfvJBH91kuhqL3"
print "passed"
def test_vector_2(self):
encoder = mnemonic()
phrase = "swallow vivid casino crisp memory library employ mystery worry neglect hold alarm fix spoil correct spin claw vivid trust base subject mixed assist brief"
passphrase = "pazzw0rd"
print "Testing phrase: " + phrase + "...",
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "39f1e55f86f11067b555185d2e6d86cc813b51563f54e483607a7ed471db118eb00b385d2fb34fd5faf79e74cf35ba1c98522c7790c66f27c973768dcc086a9d"
# BIP 32 Root Key
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNes9gjQUsjMD3P578XHgoQS43vGXXVHNTLJeTQPvnX9WRiem5bVQQ4SWqfTCtAt8r5fok7tTfK3m3uTeuin6J1wFTEyVpKpUB"
# Account Extended Keys
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'")
assert w.serialize_private() == "Ltpv77MVahmVhSqhJBVihyTaUJqpvaLKTXnE65GHR1JDqZk5DJr8jHGkc3zx5M8qnVpAV3929fFCfhqgRAt96gLn5hg4cDLkxJXJHpgtc2wAhhu"
assert w.serialize_public() == "Ltub2YXqBVC2wbNev4CYxawxYtg3N92WXnLtAbNK5z4WmkXfog3D4CKBcf7DKjqJbRvRL1xQdghafrmGRQjt6gwCPHrkyhvdV9meK5CxQ5iiM7t"
# BIP 32 Extended Keys
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0")
assert w.serialize_private() == "Ltpv796nKbPLtoaaLbGxotku9LBuGfGgo4Begr8i394UVntbHvSFsiQjsuvUv2RoEWrSXFmvjGXzsxBnUqZR2eDnJVFtsk6FRLpaz9n8JbcycpT"
assert w.serialize_public() == "Ltub2aH7vNot8x7XxTyo4WFHDv27iDxssJkJmNEji7pmRygBtHdLCdTAtX2kASCwUT6hTWUWoTFwUq8ET3pZ2MhZbkobpsg99QE7ERNiz4XZbtH"
# Address 0
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/0")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LUBcSCMewAGstz14hbmYdwgkErZy85uGPm"
assert binascii.hexlify(ser_p(w.get_public_key())) == "0226624b4f8e91645251f4b16a0d4ce81cefcf5af2bcfa216f5696d1ee01d1a06f"
assert wallet(coin, w.get_private_key()).get_wif(True) == "T8mZQbKmZDkUfu4epuhxJDLojhzKUzKJjdZm72jST94RXHrwtLUj"
# Address 1
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/1")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "Lc61u3FD42GMXKfQMTnG1FAh6zFWUfQE5o"
assert binascii.hexlify(ser_p(w.get_public_key())) == "03693801444c1cded793f38571b954cd30bd12319ecdbec6ee14ed67a9f6640c77"
assert wallet(coin, w.get_private_key()).get_wif(True) == "TAfLvSPhewck1XhtMYJubyqqAbCCp5khC3cPyR5jEMeQ1zoNhRVk"
# Address 2
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/2")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LWoNycg8pKVPrrnVRpe6fe7X74dQ3gN3iR"
assert binascii.hexlify(ser_p(w.get_public_key())) == "02c648c12f74bab045e12269c5c8ac8b0890bcc67546f8e5c0e77926021e1438e7"
assert wallet(coin, w.get_private_key()).get_wif(True) == "TARc5t58Qs6BfYdhgCSYJzVDr3uCFM8539by1359aDtkdqS67gAs"
print "passed"
def test_vector_3(self):
encoder = mnemonic()
phrase = "nominee twelve around island erupt square smoke half riot wire unable shift wave vote shock acid roof safe resemble giant front radar notice result"
passphrase = "hi"
print "Testing phrase: " + phrase + "...",
seed = binascii.hexlify(encoder.generate_seed(phrase, passphrase))
assert seed == "9fc772226b0f66662d78208595b0f8ff0977b43bfe35a02a8efef4234fcee7e9518eb77847cf7cc37d881d52d4ffe132ab96f10f5505ceb38f085f9b9a88986f"
# BIP 32 Root Key
w = parse_path(coin, binascii.unhexlify(seed), 'm')
assert w.serialize_private() == "Ltpv71G8qDifUiNesWXVFbVpmk95cSqN9ei5pjUkWV4148v47J3KCvfZnWseLeEQy8EZW7awQ1RDNPyuWoMSmR2BSbWM65AcGRGV1t2r7JTxhpj"
# Account Extended Keys
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'")
assert w.serialize_private() == "Ltpv78mnhbvapquk7bLwY8bpCdkk8ctqW1VpdF59Hp6X2VfAf84y4mPtZfHG9YDkKXMSFiXu2BQ5ZWFkmZvuQtPFbZFJ2yKzy4d2Pxu1TfkXDYs"
assert w.serialize_public() == "Ltub2Zx8JPM84zShjU3mnk6CHDaxaBb2aG4UhmBAxnroxgSmFVG3PgSKaGPXPtT79oPrpofb23CHComio7XJTjEPKN8ADbXKv9D6pXQgJULF1zg"
# BIP 32 Extended Keys
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0")
assert w.serialize_private() == "Ltpv79GuDnEECVxDZkdRaZWykezA9cco9RkdcWGu7neTHQ6gGTdda6MmyUcbqGNrEndxoR2BfAZndRNSjocYmqow5wLyy61AYbUFvPAPhU4ce8Z"
assert w.serialize_public() == "Ltub2aTEpZemSeVBBdLFqB1MqEpNbBJzDgKHh2NvnmQkDatGrpphu1QCz5is5eiix68287NGSkoqUmgsYjZEhxHY7iaRMdzFD9t7NugbB2zwM1D"
# Address 0
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/0")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LbKThttYijLBPkewZ1ozZqJD1ytNveoUZc"
assert binascii.hexlify(ser_p(w.get_public_key())) == "030124b907c411c4b481dfae1b090ca89608d6e4b8c33b8b61fce6377c870f0919"
assert wallet(coin, w.get_private_key()).get_wif(True) == "T4aYVxxW8QuyEaMu16Lqojbx3V8RHfPFedoDJrnJRcAZ8vWWbXBP"
# Address 1
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/1")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LUNXdUau8MdsoqgEh7am6DsHTNipapsArF"
assert binascii.hexlify(ser_p(w.get_public_key())) == "0362f57c3dce07d456680f5366b89158c391b38935f5513b1c712d83da73853eeb"
assert wallet(coin, w.get_private_key()).get_wif(True) == "TAqj9bzkTGmzxqALpFpXQ7t21jEPku3C625UW4TjsdP9uKUx2RaH"
# Address 2
w = parse_path(coin, binascii.unhexlify(seed), "m/44'/2'/0'/0/2")
assert wallet(coin, w.get_private_key()).get_address_as_b58(True) == "LayaSMF44QgptLTZKUiyYZPfv5rqVdkQxT"
assert binascii.hexlify(ser_p(w.get_public_key())) == "0333b608d523e51bad91dfec89fb964031f590a798350abbd61f58990077549006"
assert wallet(coin, w.get_private_key()).get_wif(True) == "T8MqFrXnb17eqTexYzA8aemdAff58EkXuQ3wSQnB5J7jW3rxfKY4"
print "passed"
| 65.82906
| 210
| 0.787241
|
79531fa9089551b8586bdca2a7af7b9ed8c4d8fc
| 3,462
|
py
|
Python
|
utils/misc.py
|
Tensor-Reloaded/Pytorch-Template
|
32dd17885de711d46433d9d59f4924d7d29735a4
|
[
"AFL-3.0"
] | 1
|
2021-12-29T10:52:08.000Z
|
2021-12-29T10:52:08.000Z
|
utils/misc.py
|
Tensor-Reloaded/Pytorch-Template
|
32dd17885de711d46433d9d59f4924d7d29735a4
|
[
"AFL-3.0"
] | 13
|
2021-05-10T11:38:06.000Z
|
2022-01-08T12:28:03.000Z
|
utils/misc.py
|
Tensor-Reloaded/Pytorch-Template
|
32dd17885de711d46433d9d59f4924d7d29735a4
|
[
"AFL-3.0"
] | null | null | null |
import sys
import time
import os
import pathlib
import zipfile
from hydra.core.hydra_config import HydraConfig
TOTAL_BAR_LENGTH = 80
LAST_T = time.time()
BEGIN_T = LAST_T
def progress_bar(current, total, msg=None):
global LAST_T, BEGIN_T
if current == 0:
BEGIN_T = time.time() # Reset for new bar.
current_len = int(TOTAL_BAR_LENGTH * (current + 1) / total)
rest_len = int(TOTAL_BAR_LENGTH - current_len) - 1
sys.stdout.write(' %d/%d' % (current + 1, total))
sys.stdout.write(' [')
for i in range(current_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
current_time = time.time()
step_time = current_time - LAST_T
LAST_T = current_time
total_time = current_time - BEGIN_T
time_used = ' Step: %s' % format_time(step_time)
time_used += ' | Tot: %s' % format_time(total_time)
if msg:
time_used += ' | ' + msg
msg = time_used
sys.stdout.write(msg)
if current < total - 1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def begin_chart(chart_name, x_axis_name,save_path=None):
if save_path is not None:
with open(os.path.join(save_path,chart_name + '.tsv'),"w") as fd:
fd.write(str(x_axis_name)+"\t"+chart_name+"\n")
print(f'{{"chart":"{chart_name}", "axis": "{x_axis_name}"}}')
def begin_per_epoch_chart(chart_name,save_path=None):
begin_chart(chart_name, 'Epoch',save_path=save_path)
def add_chart_point(chart_name, x, y,save_path=None):
if save_path is not None:
with open(os.path.join(save_path,chart_name + '.tsv'),"a+") as fd:
fd.write(str(x)+"\t"+str(y)+"\n")
print(f'{{"chart": "{chart_name}", "x":{x}, "y":{y}}}')
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def save_current_code(path: str):
print(f"Saving current code to {path}")
project_root = HydraConfig.get().runtime.cwd
unwanted_dirs = ["venv", f"utils{os.path.sep}__pycache__",
"outputs", "results", ".idea", ".git", "runs", f"models{os.path.sep}__pycache__", "data"]
unwanted_extensions = ["", "txt", "md"]
with zipfile.ZipFile(os.path.join(path, "files.zip"), "w", zipfile.ZIP_DEFLATED) as z:
for root, dirs, files in os.walk(project_root):
root = root.replace(project_root, "").lstrip(os.path.sep)
if True in [root.startswith(x) for x in unwanted_dirs]:
continue
for file in files:
if file.split(".")[-1] in unwanted_extensions:
continue
z.write(os.path.join(project_root, root, file), os.path.join(root, file))
| 30.637168
| 110
| 0.583478
|
79531fabab990cf534be190b44743f52f38ed532
| 679
|
py
|
Python
|
setup.py
|
OtakoidTony/HentaiCrawler.py
|
9c599036fd5754319641f0e865555bed7664cd05
|
[
"MIT"
] | 1
|
2021-08-20T05:28:21.000Z
|
2021-08-20T05:28:21.000Z
|
setup.py
|
OtakoidTony/HentaiCrawler.py
|
9c599036fd5754319641f0e865555bed7664cd05
|
[
"MIT"
] | null | null | null |
setup.py
|
OtakoidTony/HentaiCrawler.py
|
9c599036fd5754319641f0e865555bed7664cd05
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open("README.md", 'rt', encoding='UTF8') as rm:
long_description = rm.read()
setup(
name = "HentaiCrawler",
version = "0.1.0",
packages = ["HentaiCrawler",],
license = "MIT",
author='Rojiku, OtakoidTony, Park Hyun',
url = "https://github.com/OtakoidTony/HentaiCrawler.py",
python_requires = ">=3.5",
platforms = ['Windows', "Linux", "OSX"],
description = "Hentai Website Crawling Library",
long_description = long_description,
long_description_content_type = "text/markdown",
keywords = "Hentai Crawling Hitomi",
install_requires = [
"requests",
"BeautifulSoup4"
]
)
| 28.291667
| 60
| 0.643594
|
7953204f7599a62db554bcbcf9b7ea17f5670645
| 1,743
|
py
|
Python
|
src/python/CSVComparisons/parse_csv.py
|
carrliitos/NLPInformationExtraction
|
64c9a72b80f0a80afeafdb3db625ac19449f7cf9
|
[
"Apache-2.0"
] | 5
|
2020-09-25T07:44:21.000Z
|
2022-03-16T06:55:21.000Z
|
src/python/CSVComparisons/parse_csv.py
|
carrliitos/NLPInformationExtraction
|
64c9a72b80f0a80afeafdb3db625ac19449f7cf9
|
[
"Apache-2.0"
] | null | null | null |
src/python/CSVComparisons/parse_csv.py
|
carrliitos/NLPInformationExtraction
|
64c9a72b80f0a80afeafdb3db625ac19449f7cf9
|
[
"Apache-2.0"
] | 2
|
2020-12-17T03:03:49.000Z
|
2021-03-20T12:00:41.000Z
|
import csv
from itertools import zip_longest
with open('TEST1.csv', 'r', newline='') as csv_file1, open('TEST2.csv', 'r', newline='') as csv_file2:
csv_reader1 = csv.reader(csv_file1)
csv_reader2 = csv.reader(csv_file2)
csv1_length = 0
csv2_length = 0
next(csv_reader1)
next(csv_reader2)
word1_array = []
word2_array = []
for line in csv_reader1:
for words in line:
word1_array.append(words)
word_length = len(words)
# print(f"{words}: {word_length}")
for line in csv_reader2:
for words in line:
word2_array.append(words)
word_length = len(words)
# print(f"{words}: {word_length}")
print(f"{word1_array}\n{word2_array}")
print(f"CSV 1 length: {str(csv1_length)}")
print(f"CSV 2 length: {str(csv2_length)}")
for (array1, array2) in zip_longest(word1_array, word2_array):
print(array1, array2)
# Loop over/consume the header
# next(csv_reader1)
# next(csv_reader2)
# write a new csv file
# with open('new_test.csv', 'w') as new_file:
# csv_writer = csv.writer(new_file, delimiter='\t')
# for line1 in csv_reader1:
# csv_writer.writerow(line1)
# # print(line1[0])
# for line2 in csv_reader2:
# csv_writer.writerow(line2)
# # print(line[0])
################################################
# for line1 in csv_reader1:
# data1 = line1
# # print(data1)
# # print(line1[0])
# for line2 in csv_reader2:
# data2 = line2
# # print(line[0])
# if data1[0] == data2[0]:
# print("Hello!")
# else:
# print("NOT HELLO!")
################################################
# for data1 in csv_reader1:
# if data1[1] == "Attribute2":
# for row in csv_reader2:
# if row[0] == data1[0]:
# print(row[1])
################################################
| 22.636364
| 102
| 0.601262
|
7953211c639cb7964b1e2af495941518988aee2c
| 3,334
|
py
|
Python
|
api/synchronous/example/locustfile.py
|
shelviaandi/CameraTraps
|
5263a50261f29c1b0b1db522f3d96c9657fcdabc
|
[
"MIT"
] | null | null | null |
api/synchronous/example/locustfile.py
|
shelviaandi/CameraTraps
|
5263a50261f29c1b0b1db522f3d96c9657fcdabc
|
[
"MIT"
] | null | null | null |
api/synchronous/example/locustfile.py
|
shelviaandi/CameraTraps
|
5263a50261f29c1b0b1db522f3d96c9657fcdabc
|
[
"MIT"
] | 1
|
2020-10-26T15:33:25.000Z
|
2020-10-26T15:33:25.000Z
|
import io
import json
import os
import random
from locust import HttpLocust, TaskSet, task
from requests_toolbelt.multipart import decoder
from PIL import Image
"""
Load testing using Locust.
Installation instructions: https://docs.locust.io/en/stable/quickstart.html
Once Locust is installed, to run the tests:
locust --host=http://example.com/api/
and visit http://127.0.0.1:8089 in a browser (local testing)
"""
sample_input_dir = './sample_input/test_images'
test_image_names = sorted(os.listdir(sample_input_dir))
test_image_paths = [os.path.join(sample_input_dir, image_name) for image_name in test_image_names if
image_name.lower().endswith('.jpg')]
params = {
'confidence': 0.8,
'render': True
}
headers = {
'Ocp-Apim-Subscription-Key': os.environ.get('API_KEY', '')
}
class UserBehavior(TaskSet):
# @task
# def check_model_version(self):
# self.client.get('model_version', headers=headers, name='model_version')
@staticmethod
def get_test_image():
image_i = random.randint(0, 9) # we have 10 test images
image_name = test_image_names[image_i]
image_path = test_image_paths[image_i]
return image_name, (image_name, open(image_path, 'rb'), 'image/jpeg')
@staticmethod
def open_detection_results(response):
results = decoder.MultipartDecoder.from_response(response)
text_results = {}
images = {}
for part in results.parts:
# part is a BodyPart object with b'Content-Type', and b'Content-Disposition', the later includes 'name' and 'filename' info
headers = {}
for k, v in part.headers.items():
headers[k.decode(part.encoding)] = v.decode(part.encoding)
if headers.get('Content-Type', None) == 'image/jpeg':
# images[part.headers['filename']] = part.content
c = headers.get('Content-Disposition')
image_name = c.split('name="')[1].split('"')[
0] # somehow all the filename and name info is all in one string with no obvious forma
image = Image.open(io.BytesIO(part.content))
images[image_name] = image
elif headers.get('Content-Type', None) == 'application/json':
text_result = json.loads(part.content.decode())
print(text_result)
for img_name, img in sorted(images.items()):
print(img_name)
img.close()
print()
@task
def request_detection(self):
num_to_upload = random.randint(1, 8) # API accepts 1 to 8 images
files = {}
for i in range(num_to_upload):
image_name, file_item = UserBehavior.get_test_image()
files[image_name] = file_item
response = self.client.post('detect', name='detect:num_images:{}'.format(num_to_upload),
params=params,
files=files,
headers=headers)
UserBehavior.open_detection_results(response)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 1000 # only one task (request_detection, with model_version commented out), so this doesn't take effect.
max_wait = 1000
| 33.34
| 135
| 0.621776
|
7953222cddce59d43f43fcc0845728b081afeafb
| 3,107
|
py
|
Python
|
sdk/synapse/azure-synapse-spark/setup.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | null | null | null |
sdk/synapse/azure-synapse-spark/setup.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 1
|
2021-06-07T06:37:28.000Z
|
2021-06-07T06:37:28.000Z
|
sdk/synapse/azure-synapse-spark/setup.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-synapse-spark"
PACKAGE_PPRINT_NAME = "Synapse Spark"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
include_package_data=True,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.synapse',
]),
install_requires=[
'msrest>=0.5.0',
'azure-common~=1.1',
'azure-core>=1.6.0,<2.0.0',
],
extras_require={
":python_version<'3.0'": ['azure-synapse-nspkg'],
}
)
| 33.408602
| 91
| 0.606695
|
795322a34c37b5ced578163aa6c0503e8f579e38
| 5,410
|
py
|
Python
|
tests/test_ikala.py
|
marypilataki/mirdata
|
78981e1f1e7b8661e2d04de0dd5640981bbb1881
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_ikala.py
|
marypilataki/mirdata
|
78981e1f1e7b8661e2d04de0dd5640981bbb1881
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_ikala.py
|
marypilataki/mirdata
|
78981e1f1e7b8661e2d04de0dd5640981bbb1881
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
from mirdata import ikala, utils
from tests.test_utils import run_track_tests
def test_track():
default_trackid = '10161_chorus'
data_home = 'tests/resources/mir_datasets/iKala'
track = ikala.Track(default_trackid, data_home=data_home)
expected_attributes = {
'track_id': '10161_chorus',
'audio_path': 'tests/resources/mir_datasets/iKala/'
+ 'Wavfile/10161_chorus.wav',
'song_id': '10161',
'section': 'chorus',
'singer_id': '1',
'f0_path': 'tests/resources/mir_datasets/iKala/PitchLabel/10161_chorus.pv',
'lyrics_path': 'tests/resources/mir_datasets/iKala/Lyrics/10161_chorus.lab',
}
expected_property_types = {'f0': utils.F0Data, 'lyrics': utils.LyricData}
assert track._track_paths == {
'audio': ['Wavfile/10161_chorus.wav', '278ae003cb0d323e99b9a643c0f2eeda'],
'pitch': ['PitchLabel/10161_chorus.pv', '0d93a011a9e668fd80673049089bbb14'],
'lyrics': ['Lyrics/10161_chorus.lab', '79bbeb72b422056fd43be4e8d63319ce'],
}
run_track_tests(track, expected_attributes, expected_property_types)
# test audio loading functions
vocal, sr_vocal = track.vocal_audio
assert sr_vocal == 44100
assert vocal.shape == (44100 * 2,)
instrumental, sr_instrumental = track.instrumental_audio
assert sr_instrumental == 44100
assert instrumental.shape == (44100 * 2,)
# make sure we loaded the correct channels to vocal/instrumental
# (in this example, the first quarter second has only instrumentals)
assert np.mean(np.abs(vocal[:8820])) < np.mean(np.abs(instrumental[:8820]))
mix, sr_mix = track.mix_audio
assert sr_mix == 44100
assert mix.shape == (44100 * 2,)
assert np.array_equal(mix, instrumental + vocal)
def test_to_jams():
data_home = 'tests/resources/mir_datasets/iKala'
track = ikala.Track('10161_chorus', data_home=data_home)
jam = track.to_jams()
lyrics = jam.search(namespace='lyric')[0]['data']
assert [lyric.time for lyric in lyrics] == [0.027, 0.232]
assert [lyric.duration for lyric in lyrics] == [0.20500000000000002, 0.736]
assert [lyric.value for lyric in lyrics] == ['JUST', 'WANNA']
assert [lyric.confidence for lyric in lyrics] == [None, None]
f0s = jam.search(namespace='pitch_contour')[0]['data']
assert [f0.time for f0 in f0s] == [0.016, 0.048]
assert [f0.duration for f0 in f0s] == [0.0, 0.0]
assert [f0.value for f0 in f0s] == [
{'frequency': 0.0, 'index': 0, 'voiced': False},
{'frequency': 260.946404518887, 'index': 0, 'voiced': True},
]
assert [f0.confidence for f0 in f0s] == [0.0, 1.0]
def test_load_f0():
# load a file which exists
f0_path = 'tests/resources/mir_datasets/iKala/PitchLabel/10161_chorus.pv'
f0_data = ikala.load_f0(f0_path)
# check types
assert type(f0_data) == utils.F0Data
assert type(f0_data.times) is np.ndarray
assert type(f0_data.frequencies) is np.ndarray
assert type(f0_data.confidence) is np.ndarray
# check values
assert np.array_equal(f0_data.times, np.array([0.016, 0.048]))
assert np.array_equal(f0_data.frequencies, np.array([0.0, 260.946404518887]))
assert np.array_equal(f0_data.confidence, np.array([0.0, 1.0]))
def test_load_lyrics():
# load a file without pronunciations
lyrics_path_simple = 'tests/resources/mir_datasets/iKala/Lyrics/10161_chorus.lab'
lyrics_data_simple = ikala.load_lyrics(lyrics_path_simple)
# check types
assert type(lyrics_data_simple) is utils.LyricData
assert type(lyrics_data_simple.start_times) is np.ndarray
assert type(lyrics_data_simple.end_times) is np.ndarray
assert type(lyrics_data_simple.lyrics) is np.ndarray
assert type(lyrics_data_simple.pronunciations) is np.ndarray
# check values
assert np.array_equal(lyrics_data_simple.start_times, np.array([0.027, 0.232]))
assert np.array_equal(lyrics_data_simple.end_times, np.array([0.232, 0.968]))
assert np.array_equal(lyrics_data_simple.lyrics, np.array(['JUST', 'WANNA']))
assert np.array_equal(lyrics_data_simple.pronunciations, np.array([None, None]))
# load a file with pronunciations
lyrics_path_pronun = 'tests/resources/mir_datasets/iKala/Lyrics/10164_chorus.lab'
lyrics_data_pronun = ikala.load_lyrics(lyrics_path_pronun)
# check types
assert type(lyrics_data_pronun) is utils.LyricData
assert type(lyrics_data_pronun.start_times) is np.ndarray
assert type(lyrics_data_pronun.end_times) is np.ndarray
assert type(lyrics_data_pronun.lyrics) is np.ndarray
assert type(lyrics_data_pronun.pronunciations) is np.ndarray
# check values
assert np.array_equal(lyrics_data_pronun.start_times, np.array([0.021, 0.571]))
assert np.array_equal(lyrics_data_pronun.end_times, np.array([0.189, 1.415]))
assert np.array_equal(lyrics_data_pronun.lyrics, np.array(['ASDF', 'EVERYBODY']))
assert np.array_equal(lyrics_data_pronun.pronunciations, np.array(['t i au', None]))
def test_load_metadata():
data_home = 'tests/resources/mir_datasets/iKala'
metadata = ikala._load_metadata(data_home)
assert metadata['data_home'] == data_home
assert metadata['10161'] == '1'
assert metadata['21025'] == '1'
metadata_none = ikala._load_metadata('asdf/asdf')
assert metadata_none is None
| 39.202899
| 88
| 0.707579
|
7953237757066a1e307a88164071dd53e0b5748e
| 22
|
py
|
Python
|
Data/__init__.py
|
himammz/GpTest
|
66d6158fe3729cd26ee89b5d4531a45ef80b4bb4
|
[
"Apache-2.0"
] | 1
|
2017-05-03T17:45:58.000Z
|
2017-05-03T17:45:58.000Z
|
Data/__init__.py
|
LobnaMazhar/test-Python_heroku_webhook
|
cf51971646a3481cc89bbfef17e5816f8684448d
|
[
"Apache-2.0"
] | null | null | null |
Data/__init__.py
|
LobnaMazhar/test-Python_heroku_webhook
|
cf51971646a3481cc89bbfef17e5816f8684448d
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['Database']
| 22
| 22
| 0.681818
|
7953243c5927c2f75d4af44559f4faacd3d719b5
| 4,262
|
py
|
Python
|
sdk/python/pulumi_azure/automation/get_string_variable.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/automation/get_string_variable.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/automation/get_string_variable.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetStringVariableResult:
"""
A collection of values returned by getStringVariable.
"""
def __init__(__self__, automation_account_name=None, description=None, encrypted=None, id=None, name=None, resource_group_name=None, value=None):
if automation_account_name and not isinstance(automation_account_name, str):
raise TypeError("Expected argument 'automation_account_name' to be a str")
__self__.automation_account_name = automation_account_name
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
__self__.description = description
"""
The description of the Automation Variable.
"""
if encrypted and not isinstance(encrypted, bool):
raise TypeError("Expected argument 'encrypted' to be a bool")
__self__.encrypted = encrypted
"""
Specifies if the Automation Variable is encrypted. Defaults to `false`.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
__self__.value = value
"""
The value of the Automation Variable as a `string`.
"""
class AwaitableGetStringVariableResult(GetStringVariableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStringVariableResult(
automation_account_name=self.automation_account_name,
description=self.description,
encrypted=self.encrypted,
id=self.id,
name=self.name,
resource_group_name=self.resource_group_name,
value=self.value)
def get_string_variable(automation_account_name=None,name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Automation String Variable.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.automation.get_string_variable(name="tfex-example-var",
resource_group_name="tfex-example-rg",
automation_account_name="tfex-example-account")
pulumi.export("variableId", example.id)
```
:param str automation_account_name: The name of the automation account in which the Automation Variable exists.
:param str name: The name of the Automation Variable.
:param str resource_group_name: The Name of the Resource Group where the automation account exists.
"""
__args__ = dict()
__args__['automationAccountName'] = automation_account_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:automation/getStringVariable:getStringVariable', __args__, opts=opts).value
return AwaitableGetStringVariableResult(
automation_account_name=__ret__.get('automationAccountName'),
description=__ret__.get('description'),
encrypted=__ret__.get('encrypted'),
id=__ret__.get('id'),
name=__ret__.get('name'),
resource_group_name=__ret__.get('resourceGroupName'),
value=__ret__.get('value'))
| 39.100917
| 149
| 0.685594
|
795324cb64549f94a2e1e72de3f5747dcd82d838
| 3,049
|
py
|
Python
|
octoprint.py
|
gaunab/octopi_gui
|
1cc1d71d3b1acf0c0041c71a005de3dbd7667def
|
[
"BSD-3-Clause"
] | null | null | null |
octoprint.py
|
gaunab/octopi_gui
|
1cc1d71d3b1acf0c0041c71a005de3dbd7667def
|
[
"BSD-3-Clause"
] | null | null | null |
octoprint.py
|
gaunab/octopi_gui
|
1cc1d71d3b1acf0c0041c71a005de3dbd7667def
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
# http://isbullsh.it/2012/06/Rest-api-in-python/#conclusion
import requests, json
import os.path
class Api():
def __init__(self,url="localhost",api=None,apikeyfile=None):
""" Create new Connection-Object to octoprint
Params: - url (string)
Url of octoprint Server
- api (string)
Apikey - if no apikey is given, apikeyfile is searched for
- apikeyfile
"""
self.url = url
if api == None:
if apikeyfile == None:
try:
self.api = self.apikey()
except:
self.api = ""
else:
self.api = self.apikey(apikeyfile)
else:
self.api = api
self.headers ={'apikey': self.api, 'Content-Type':'application/json'}
def apikey(self,filename='apikey'):
""" Fetch APIKEY from File. If no Filename is given, ./apikey is used """
f = open(filename)
line = f.readline()
f.close()
return line.strip()
def version(self):
""" Return Version of Server """
r = requests.get("http://%s/api/version" %(self.url), headers=self.headers)
if r.status_code == 200:
return True, r.content
else:
return False, {}
def job(self):
r = requests.get("http://%s/api/job" %(self.url), headers=self.headers)
if r.status_code == 200:
job = json.loads(r.content.decode())
return True, job
else:
return False, {}
def progress(self):
""" Return Progress """
r = requests.get("http://%s/api/job" %(self.url), headers=self.headers)
if r.status_code == 200:
job = json.loads(r.content.decode())
return True, job['progress']
else:
return False, {}
def bedtemp(self):
r = requests.get("http://%s/api/printer/bed" %(self.url), headers=self.headers)
if r.status_code == 200:
return True,json.loads(r.content.decode())
else:
return False, {}
def tooltemp(self):
r = requests.get("http://%s/api/printer/tool" %(self.url), headers=self.headers)
if r.status_code == 200:
return True, json.loads(r.content.decode())
else:
return False, {}
def stop(self):
""" Stop Current Job """
payload = {'command':'cancel'}
r = requests.post("http://%s/api/job" %(self.url), headers=self.headers,
data=json.dumps(payload))
if r.status_code == 204:
return True
else:
return False
def start(self):
""" Start Current Job """
payload = {'command':'cancel'}
r = requests.post("http://%s/api/job" %(self.url), headers=self.headers,
data=json.dumps(payload))
if r.status_code == 204:
return True
else:
return False
| 30.79798
| 88
| 0.516563
|
795325797bd22c251135eb538383fbd4bf408068
| 471
|
py
|
Python
|
bungieapi/generated/components/schemas/destiny/definitions/animations.py
|
itemmanager/bungieapi
|
0c4326f88ea0f28a1dcab683dc08c8d21c940fc1
|
[
"MIT"
] | 5
|
2022-01-06T21:05:53.000Z
|
2022-02-12T19:58:11.000Z
|
bungieapi/generated/components/schemas/destiny/definitions/animations.py
|
itemmanager/bungieapi
|
0c4326f88ea0f28a1dcab683dc08c8d21c940fc1
|
[
"MIT"
] | 8
|
2021-12-25T02:40:56.000Z
|
2022-03-28T03:31:41.000Z
|
bungieapi/generated/components/schemas/destiny/definitions/animations.py
|
itemmanager/bungieapi
|
0c4326f88ea0f28a1dcab683dc08c8d21c940fc1
|
[
"MIT"
] | 1
|
2022-01-30T23:53:25.000Z
|
2022-01-30T23:53:25.000Z
|
# generated by update to not change manually
import dataclasses as dt
import typing as t
from bungieapi.json import to_json
@dt.dataclass(frozen=True)
class DestinyAnimationReference:
anim_identifier: str
anim_name: str
path: str
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"animName": to_json(self.anim_name),
"animIdentifier": to_json(self.anim_identifier),
"path": to_json(self.path),
}
| 23.55
| 60
| 0.66242
|
795325a601defaa8695328190f3fe5332f3ccff6
| 841
|
py
|
Python
|
Standard Library/socket/Sentdex/Three/client.py
|
shubhamnag14/Python-Documents
|
d3fee0ad90232b413f6ac1b562588fb255b79e42
|
[
"Apache-2.0"
] | 2
|
2020-11-27T13:21:05.000Z
|
2021-04-19T21:14:21.000Z
|
Standard Library/socket/Sentdex/Three/client.py
|
shubhamnag14/Python-Documents
|
d3fee0ad90232b413f6ac1b562588fb255b79e42
|
[
"Apache-2.0"
] | null | null | null |
Standard Library/socket/Sentdex/Three/client.py
|
shubhamnag14/Python-Documents
|
d3fee0ad90232b413f6ac1b562588fb255b79e42
|
[
"Apache-2.0"
] | 1
|
2021-06-27T20:31:42.000Z
|
2021-06-27T20:31:42.000Z
|
import socket
import pickle
HEADERSIZE = 10
# https://pythonprogramming.net/pickle-objects-sockets-tutorial-python-3/
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 1243))
while True:
full_msg = ''
new_msg = True
while True:
msg = s.recv(16)
if new_msg:
print("new msg len:", msg[:HEADERSIZE])
msglen = int(msg[:HEADERSIZE])
new_msg = False
print(f"full message length: {msglen}")
full_msg += msg.decode("utf-8")
print(len(full_msg))
if len(full_msg)-HEADERSIZE == msglen:
print("full msg recvd")
print(full_msg[HEADERSIZE:])
d = pickle.loads(full_msg[HEADERSIZE:])
print(d)
new_msg = True
full_msg = b''
print(full_msg)
| 24.028571
| 73
| 0.581451
|
795326e519f2810aecf2b35a27faf6d6de5bf454
| 13,539
|
py
|
Python
|
pycity_base/classes/demand/zone_parameters.py
|
RWTH-EBC/pyCity
|
88c832aa647ceb8889abd8f851b7349c3366e30a
|
[
"MIT"
] | 11
|
2019-08-13T15:18:04.000Z
|
2022-01-19T08:03:02.000Z
|
pycity_base/classes/demand/zone_parameters.py
|
RWTH-EBC/pyCity
|
88c832aa647ceb8889abd8f851b7349c3366e30a
|
[
"MIT"
] | 29
|
2019-06-15T20:51:16.000Z
|
2020-08-27T16:04:30.000Z
|
pycity_base/classes/demand/zone_parameters.py
|
RWTH-EBC/pyCity
|
88c832aa647ceb8889abd8f851b7349c3366e30a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 21:22:29 2015
@author: tsz
Inputs:
A_f (section 6.4) - used area in m^2
"""
from __future__ import division
import numpy as np
import math
class ZoneParameters(object):
"""
This class holds all relevant parameters of a single zone as described in
DIN EN ISO 13790:2008 (German version of ISO 13790:2008).
This class also provides a function to compute the resistance
corresponding to ventilation.
"""
def __init__(self,
A_f=0,
A_w=[],
U_w=[],
g_gln=[],
epsilon_w=[],
R_se_w=0.04,
A_op=[],
U_op=[],
alpha_Sc=[],
R_se_op=0.04,
epsilon_op=0,
V=0,
sampling_rate=3600,
building_class=0,
kappa_j=[],
A_j=[],
simplified_capacity=True,
albedo=0.2,
gamma=[0, 90, 180, 270, 0, 0],
beta=[90, 90, 90, 90, 0, 0]):
"""
Set up a thermal zone as required for calculations according to
DIN EN ISO 13790:2008 (German version of ISO 13790:2008).
Simplifications
---------------
Glazings : Windows, doors
DIN EN ISO 13790, section 11.3.2, equation 43, page 67. Neglecting
the shading factor (F_sh,ob,k=1).
DIN EN ISO 13790, section 11.3.3, equation 44, page 67. Neglecting
the shading factor (F_sh,gl=1). Neglecting borders (F_F=0).
Consequently, A_w,p is assumed to be A_w (see parameters). In this
manner, the number of parameters is largely reduced.
DIN EN ISO 13790, section 11.4.2, equation 47, page 70. Usage of
the simplified, averaged solar energy transmittance (reduction
of required parameters).
Opaque components : Walls, roofs
DIN EN ISO 13790, section 11.3.4, equation 45, page 68. U_c and A_c
are assumed to be equal to U_op and W_op (see parameters).
Index-convention for parameters
-------------------------------
- 0 : South
- 1 : West
- 2 : North
- 3 : East
- 4 : Roof / Ceiling
- 5 : Floor
This convention is derived from the definition of the surface azimuth
angles. See Duffie and Beckman: Solar Engineering of Thermal Processes
(4th ed.), section 1.6, page 13. If a surface for example does not
contain any windows, enter 0 in this entry.
Parameters
----------
A_f : float
Used floor area in m^2 (cf. DIN EN ISO 13790, section 6.4, page 30)
A_w : array-like
Surface area of each window-like component (window, door,...) in m^2
U_w : array-like
U-values for each window-like component (window, door,...) in W/m^2K
g_gln : float or array-like
Energy transmittance of window-like components (without unit).
See DIN EN ISO 13790, section 11.4.2, page 70.
The fifth entry (floor) is typically 0, since the sun does not
directly affect the floor.
epsilon_w : float or array-like
Emissivity of window-like components.
See DIN EN ISO 13790, section 11.3.4, page 73, equation 51
R_se_w : float or array-like
Surface thermal resistance of window-like components.
See DIN EN ISO 13790, section 11.3.4, page 68 or ISO 6946
A_op : array-like
Surface area of each opaque component (walls, roof) in m^2
U_op : array-like
U-values for each opaque component (walls, roof) in W/m^2K
alpha_Sc : array-like
Attenuation coefficient for each opaque component, without unit.
The fifth entry (floor) is typically 0, since the sun does not
directly affect the floor.
R_se_op : float or array-like
Surface thermal resistance of opaque components.
See DIN EN ISO 13790, section 11.3.4, page 68 or ISO 6946
epsilon_op : float or array-like
Emissivity of opaque components.
See DIN EN ISO 13790, section 11.3.4, page 73, equation 51
V : float
Zone's volume in m3
sampling_rate : integer, optional
Sampling rate required for the computation and converting the
ventilation profile
building_class : integer, optional
- 0: very light
- 1: light
- 2: medium
- 3: heavy
- 4: very heavy
Optional. Only used if ``simplified_capacity==True``
kappa_j : array-like, optional
Heat capacity of each component that is in contact with the indoor
air in J/m^2K. Optional. Only used if ``simplified_capacity==False``
A_j : array-like, optional
Surface area of each component that is in contact with the indoor
air in m^2. Optional. Only used if ``simplified_capacity==False``
simplified_capacity : boolean, optional
- ``True``: Simplified computation of effective area and capacity
- ``False``: Detailed computation of effective area and capacity
albedo : float, optional
Average reflectivity of the ground.
Typical values are between 0.2 and 0.3.
gamma : array-like, optional
Surface azimuth angle, according to the index convention.
0 represents Southern orientation and 90 Western orientation.
beta : array-like, optional
Slope angle. 0 stands for horizontal surfaces and 90 for vertical.
"""
self._kind = "zoneparameters"
# Note: We are not consequently using CamelCase in this function,
# because many variables introduced in ISO 13790 have short indices,
# which makes CamelCase not harder to read.
# Note: If not stated differently, all equations, pages and sections
# refer to DIN EN ISO 13790:2008 (the official German version of
# ISO 13790:2008).
# Save sampling rate
self.sampling_rate = sampling_rate
# Compute A_t and H_tr_is
# Equation 9, section 7.2.2.2 (pages 35, 36)
h_is = 3.45 # m^2, section 7.2.2.2, page 35
lambda_at = 4.5 # section 7.2.2.2, page 36
self.A_t = A_f * lambda_at # m^2
self.H_tr_is = self.A_t * h_is # W/K
# Compute C_m and A_m
# Equations 65 and 66, resp. table 12 if the simplified method is used
# Pages 79-81, sections 12.2.2 and 12.3.1
if simplified_capacity:
# Table 12, section 12.3.1.2, page 81
if building_class == 0:
self.A_m = 2.5 * A_f
self.C_m = 80000 * A_f
elif building_class == 1:
self.A_m = 2.5 * A_f
self.C_m = 11000 * A_f
elif building_class == 2:
self.A_m = 2.5 * A_f
self.C_m = 165000 * A_f
elif building_class == 3:
self.A_m = 3 * A_f
self.C_m = 260000 * A_f
else:
self.A_m = 3.5 * A_f
self.C_m = 370000 * A_f
else:
# Equations 65 and 66, sections 12.2.2 and 12.3.1.1., pages 79, 80
kappa_j = np.array(kappa_j)
A_j = np.array(A_j)
self.C_m = np.sum(kappa_j * A_j)
self.A_m = math.pow(self.C_m, 2) / np.sum(A_j * np.power(kappa_j, 2))
# Compute heat transmission through windows (also doors, glazed walls,
# curtain walls...)
# DIN EN ISO 13790:2008, equation 18, section 8.3.1, page 44
# Point- and line-based heat transfer is neglected to simplify the
# parametrization (l_k, Psi_k, chi_j = 0)
# As only one thermal zone is considered, b_tr_x = 1
self.A_windows = np.array(A_w)
self.U_windows = np.array(U_w)
self.H_tr_w = np.sum(self.A_windows * self.U_windows)
# Save effective area and radiative heat losses to the sky of window-
# like components.
# DIN EN ISO 13790:2008, section 11.4.2, equation 47
F_w = 0.9 # without unit
g_gln = np.array(g_gln)
self.g_gl = F_w * g_gln
# DIN EN ISO 13790:2008, section 11.3.2, equation 44
F_sh_gl = 1.0
F_F = 0
self.A_windows_sol = F_sh_gl * self.g_gl * (1 - F_F) * self.A_windows
# Simplification of external radiative heat exchange coefficient
# Slightly below DIN EN ISO 13790, section 11.4.6, equation 51, page 73
epsilon_w = np.array(epsilon_w)
h_r_windows = 5 * epsilon_w
# Simplification of average difference between ambient air temperature
# and sky's temperature.
# Slightly below DIN EN ISO 13790, section 11.4.6, equation 51, page 73
Delta_theta_er = 11 # K
# DIN EN ISO 13790, section 11.3.5, equation 46, page 69
R_se_op = np.array(R_se_op)
self.Psi_r_windows = (R_se_w * self.U_windows * self.A_windows * h_r_windows * Delta_theta_er)
# H_tr_ms, based on equation 64, section 12.2.2, page 79
h_ms = 9.1 # W/m^2K, section 12.2.2, page 79
self.H_tr_ms = h_ms * self.A_m # W/K
# Compute heat transmission through opaque parts (walls, roof)
# Compute total heat transmission through opaque parts
# Same source as for heat transmissions through windows and same
# simplifications (no 0-dimensional and 1-dimensional heat transfer)
self.A_opaque = np.array(A_op)
self.U_opaque = np.array(U_op)
if len(self.U_opaque.shape) > 1:
self.H_tr_op = np.zeros(self.U_opaque.shape[0])
self.H_tr_em = np.zeros(self.U_opaque.shape[0])
for i in range(len(self.H_tr_op)):
self.H_tr_op[i] = np.sum(self.A_opaque * self.U_opaque[i, :])
# H_tr_em, based on equation 63, section 12.2.2, page 79
self.H_tr_em[i] = 1 / (1 / self.H_tr_op[i] - 1 / self.H_tr_ms)
else:
self.H_tr_op = np.sum(self.A_opaque * self.U_opaque)
self.H_tr_em = 1 / (1 / self.H_tr_op - 1 / self.H_tr_ms)
# Save effective area and radiative heat losses to the sky of opaque
# components.
# DIN EN ISO 13790:2008, section 11.4.2, equation 45, page 68
alpha_Sc = np.array(alpha_Sc)
R_se_op = np.array(R_se_op)
# Simplification of external radiative heat exchange coefficient
# Slightly below DIN EN ISO 13790, section 11.4.6, equation 51, page 73
epsilon_op = np.array(epsilon_op)
h_r_opaque = 5 * epsilon_op
if len(self.U_opaque.shape) > 1:
self.A_opaque_sol = alpha_Sc * R_se_op * self.U_opaque[0] * self.A_opaque
# DIN EN ISO 13790, section 11.3.5, equation 46, page 69
self.Psi_r_opaque = (R_se_op * self.U_opaque[0] * self.A_opaque
* h_r_opaque * Delta_theta_er)
else:
self.A_opaque_sol = alpha_Sc * R_se_op * self.U_opaque * self.A_opaque
# DIN EN ISO 13790, section 11.3.5, equation 46, page 69
self.Psi_r_opaque = (R_se_op * self.U_opaque * self.A_opaque * h_r_opaque * Delta_theta_er)
# H_tr_em, based on equation 63, section 12.2.2, page 79
# self.H_tr_em = 1 / (1 / self.H_tr_op - 1 / self.H_tr_ms)
# Save zone's volume for later computing the ventilation
# As ventilation is a dynamic effect, a scalar heat transfer
# coefficient is insufficient. Instead a vector/array will be computed
self.V = V
# Initialize ventilation
ventilationRate = np.zeros(int(8760 / 3600 * sampling_rate))
self.updateVentilation(ventilationRate)
# Save Albedo
self.albedo = albedo
# Save beta and gamma
self.beta = beta
self.gamma = gamma
# Compute interaction between outside surfaces (indexes 0-4) and sky
# 0.5 describes vertical walls and 1 horizontal roofs
# (see DIN EN ISO 13790:2008, section 11.4.6, page 73)
self.F_r = [0.5 if beta[i] > 0 else 1 for i in range(5)]
@property
def kind(self):
return self._kind
def updateVentilation(self,
ventilationRate,
ventilationRateMinimum=0.5):
"""
Compute the heat transfer due to ventilation for the given
ventilationRate.
ventilationRate : array-like
Infiltration due to window opening, etc.
ventilationRateMinimum : float, optional
Minimum air exchange rate.
"""
ventilationRate = np.maximum(ventilationRate, ventilationRateMinimum)
rhoAir = 1.2 # kg/m^3
cPAir = 1000 # J/kgK
# Confirm DIN EN ISO 13789:2008-04, page 11, section 5, equation 4.
self.H_ve = (rhoAir * cPAir * ventilationRate *
self.V / self.sampling_rate)
| 42.442006
| 103
| 0.572273
|
795329942ee80322beabc19262a95f119509000d
| 543
|
py
|
Python
|
player/migrations/0004_song_account.py
|
jacebrowning/virtualboombox
|
a4f0b2647b9f12e7e7ec4a3a245074225a35808d
|
[
"MIT"
] | 4
|
2020-01-03T06:55:18.000Z
|
2021-03-23T18:40:39.000Z
|
player/migrations/0004_song_account.py
|
jacebrowning/virtualboombox
|
a4f0b2647b9f12e7e7ec4a3a245074225a35808d
|
[
"MIT"
] | 3
|
2019-12-30T21:21:47.000Z
|
2021-06-10T19:49:40.000Z
|
player/migrations/0004_song_account.py
|
jacebrowning/virtualboombox
|
a4f0b2647b9f12e7e7ec4a3a245074225a35808d
|
[
"MIT"
] | 1
|
2021-03-23T18:40:55.000Z
|
2021-03-23T18:40:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-29 23:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('player', '0003_auto_20170129_1726'),
]
operations = [
migrations.AddField(
model_name='song',
name='account',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='player.Account'),
),
]
| 24.681818
| 113
| 0.64825
|
79532a623fb15c766120b09ce43676730523f843
| 1,306
|
py
|
Python
|
sleepypuppy/admin/assessment/models.py
|
hexlism/css_platform
|
0c80cb314e7e3ecf73de2feec5349c04c0dd581b
|
[
"Apache-2.0"
] | 4
|
2016-07-28T08:07:40.000Z
|
2019-08-25T16:48:05.000Z
|
sleepypuppy/admin/assessment/models.py
|
hexlism/css_platform
|
0c80cb314e7e3ecf73de2feec5349c04c0dd581b
|
[
"Apache-2.0"
] | null | null | null |
sleepypuppy/admin/assessment/models.py
|
hexlism/css_platform
|
0c80cb314e7e3ecf73de2feec5349c04c0dd581b
|
[
"Apache-2.0"
] | 3
|
2016-07-29T06:16:49.000Z
|
2019-08-25T16:43:43.000Z
|
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sleepypuppy import db
class Assessment(db.Model):
"""
Assessemt model contains the following parameters:
name = name of the assessment you are working on.
payloads = payloads assocaited with the assessment
"""
__tablename__ = 'assessments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500))
snooze = db.Column(db.Boolean)
run_once = db.Column(db.Boolean)
access_log_enabled = db.Column(db.Boolean)
def as_dict(self):
"""Return Assessment model as JSON object"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return str(self.name)
| 34.368421
| 78
| 0.692956
|
79532b91149b17e53a54153552f70e7f3aeceace
| 51,814
|
py
|
Python
|
Lib/httplib.py
|
karkranikhil/python
|
a96b80dfda19ed4cdcc35b087fb8474757e34a93
|
[
"bzip2-1.0.6"
] | 4
|
2020-05-18T22:35:54.000Z
|
2021-08-24T02:50:38.000Z
|
Lib/httplib.py
|
karkranikhil/python
|
a96b80dfda19ed4cdcc35b087fb8474757e34a93
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/httplib.py
|
karkranikhil/python
|
a96b80dfda19ed4cdcc35b087fb8474757e34a93
|
[
"bzip2-1.0.6"
] | 4
|
2020-05-18T02:51:32.000Z
|
2022-02-25T21:54:30.000Z
|
r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from array import array
import os
import re
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
# maximum amount of headers accepted
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more lenient than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
# These characters are not allowed within HTTP URL paths.
# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
# Prevents CVE-2019-9740. Includes control characters such as \r\n.
# Restrict non-ASCII characters above \x7f (0x80-0xff).
_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
# Arguably only these _should_ allowed:
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If an invalid line is found in the
header section, it is skipped, and further lines are processed.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
tell = None
if not hasattr(self.fp, 'unread') and self.seekable:
tell = self.fp.tell
while True:
if len(hlist) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if tell:
try:
tell()
except IOError:
tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
elif headerseen is not None:
# An empty header name. These aren't allowed in HTTP, but it's
# probably a benign mistake. Don't add the header, just keep
# going.
pass
else:
# It's not a header line; skip it and try the next line.
self.status = 'Non-header line where header expected'
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
if buffering:
# The caller won't be using any sock.recv() calls, so buffering
# is fine and recommended for performance.
self.fp = sock.makefile('rb')
else:
# The buffer size is specified as zero, because the headers of
# the response are read with readline(). If the reads were
# buffered the readline() calls could consume some of the
# response, which make be read via a recv() on the underlying
# socket.
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine("No status line received - the server has closed the connection")
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
fp = self.fp
if fp:
self.fp = None
fp.close()
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self.close()
raise
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if not s and amt:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self.close()
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(''.join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
elif amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ''.join(value)
elif amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return ''.join(value)
else:
value.append(self._safe_read(chunk_left))
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return ''.join(value)
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
# NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never
# return less than x bytes unless EOF is encountered. It now handles
# signal interruptions (socket.error EINTR) internally. This code
# never caught that exception anyways. It seems largely pointless.
# self.fp.read(amt) will work fine.
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
if strict is not None:
self.strict = strict
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unittests
# to replace with a suitable mock
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
""" Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP Connect tunneling, the host passed to the
constructor is used as proxy server that relays all communication to the
endpoint passed to set_tunnel. This is done by sending a HTTP CONNECT
request to the proxy server when the connection is established.
This method must be called before the HTTP connection has been
established.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
# Verify if this is required.
if self.sock:
raise RuntimeError("Can't setup tunnel for established connection.")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
response = self.response_class(self.sock, strict = self.strict,
method = self._method)
(version, code, message) = response._read_status()
if version == "HTTP/0.9":
# HTTP/0.9 doesn't support the CONNECT verb, so if httplib has
# concluded HTTP/0.9 is being used something has gone wrong.
self.close()
raise socket.error("Invalid response from tunnel request")
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending trailer
break
if line == '\r\n':
break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(data)
blocksize = 8192
if hasattr(data,'read') and not isinstance(data, array):
if self.debuglevel > 0: print "sendIng a read()able"
datablock = data.read(blocksize)
while datablock:
self.sock.sendall(datablock)
datablock = data.read(blocksize)
else:
self.sock.sendall(data)
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
#message_body was not a string (i.e. it is a file) and
#we must run the risk of Nagle
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method for use later in the response phase
self._method = method
url = url or '/'
self._validate_path(url)
request = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(self._encode_request(request))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# Wrap the IPv6 Host Header with [] (RFC 2732)
if host_enc.find(':') >= 0:
host_enc = "[" + host_enc + "]"
if port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def _encode_request(self, request):
# On Python 2, request is already encoded (default)
return request
def _validate_path(self, url):
"""Validate a url for putrequest."""
# Prevent CVE-2019-9740.
match = _contains_disallowed_url_pchar_re.search(url)
if match:
msg = (
"URL can't contain control characters. {url!r} "
"(found at least {matched!r})"
).format(matched=match.group(), url=url)
raise InvalidURL(msg)
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
header = '%s' % header
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = [str(v) for v in values]
for one_value in values:
if _is_illegal_header_value(one_value):
raise ValueError('Invalid header value %r' % (one_value,))
hdr = '%s: %s' % (header, '\r\n\t'.join(values))
self._output(hdr)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass a message body
associated with the request. The message body will be sent in
the same packet as the message headers if it is string, otherwise it is
sent as a separate packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body, method):
# Set the content-length based on the body. If the body is "empty", we
# set Content-Length: 0 for methods that expect a body (RFC 7230,
# Section 3.3.2). If the body is set for other methods, we set the
# header provided we can figure out what the length is.
thelen = None
if body is None and method.upper() in _METHODS_EXPECTING_BODY:
thelen = '0'
elif body is not None:
try:
thelen = str(len(body))
except (TypeError, AttributeError):
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if 'content-length' not in header_names:
self._set_content_length(body, method)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders(body)
def getresponse(self, buffering=False):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
args = (self.sock,)
kwds = {"strict":self.strict, "method":self._method}
if self.debuglevel > 0:
args += (self.debuglevel,)
if buffering:
#only add this keyword if non-default, for compatibility with
#other response_classes.
kwds["buffering"] = True;
response = self.response_class(*args, **kwds)
try:
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will raise
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.putheader = conn.putheader
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
(self._conn.host, self._conn.port) = self._conn._get_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
if not buffering:
response = self._conn.getresponse()
else:
#only add this keyword if non-default for compatibility
#with other connection classes
response = self._conn.getresponse(buffering)
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
HTTPConnection.__init__(self, host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
def connect(self):
"Connect to a host on a given (SSL) port."
HTTPConnection.connect(self)
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None, context=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict,
context=context))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
| 35.537723
| 97
| 0.576813
|
79532c02497ec5f050ec2e5ec8803034d80fbfb0
| 4,297
|
py
|
Python
|
avx512-jpeg-zizag/16bit-array/sse_generate.py
|
clayne/toys
|
ec06411e2d3b920403607888d4a573e41390ee5b
|
[
"BSD-2-Clause"
] | 2
|
2019-01-06T05:32:18.000Z
|
2019-12-12T04:54:56.000Z
|
avx512-jpeg-zizag/16bit-array/sse_generate.py
|
clayne/toys
|
ec06411e2d3b920403607888d4a573e41390ee5b
|
[
"BSD-2-Clause"
] | null | null | null |
avx512-jpeg-zizag/16bit-array/sse_generate.py
|
clayne/toys
|
ec06411e2d3b920403607888d4a573e41390ee5b
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
order = [
[ 0, 1, 8, 16, 9, 2, 3, 10],
[17, 24, 32, 25, 18, 11, 4, 5],
[12, 19, 26, 33, 40, 48, 41, 34],
[27, 20, 13, 6, 7, 14, 21, 28],
[35, 42, 49, 56, 57, 50, 43, 36],
[29, 22, 15, 23, 30, 37, 44, 51],
[58, 59, 52, 45, 38, 31, 39, 46],
[53, 60, 61, 54, 47, 55, 62, 63],
]
source_reg_name = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
target_reg_name = ['row%d' % k for k in range(8)]
def main():
try:
idx = sys.argv.index('--copy-single')
del sys.argv[idx]
copy_single_item = True
except ValueError:
copy_single_item = False
lines = generate_code(copy_single_item)
indent = ' ' * 8
output = '\n'.join(indent + line for line in lines)
try:
file = open(sys.argv[1], 'wt')
except IndexError:
file = sys.stdout
file.write(output)
def generate_code(copy_single_item):
lines = []
for rowid, row in enumerate(order):
lines.append('')
lines.append('// row #%d' % rowid)
def get_tmp_name(register):
return '%s_%d' % (source_reg_name[register], rowid)
indices, used_registers = get_target_order(row)
target_name = target_reg_name[rowid]
# 1. generate partial results for given row
copy_item = []
shuffle = []
for register in used_registers:
single_item = get_single_item_indices(register, indices)
if copy_single_item and single_item is not None:
target_index, source_index = single_item
copy_item.append((register, source_index, target_index))
else:
pshufb_input = get_pshufb_bytes(register, indices)
shuffle.append((register, pshufb_input))
# 2. generate C++ code
def generate_shuffle(register, pshufb_input):
register_name = source_reg_name[register]
tmp_name = get_tmp_name(register)
shuf_name = '%s_shuf' % tmp_name
pshufb_fmt = ', '.join(map(str, pshufb_input))
lines.append('const __m128i %s = _mm_setr_epi8(%s);' % (shuf_name, pshufb_fmt))
lines.append('const __m128i %s = _mm_shuffle_epi8(%s, %s);' % (tmp_name, register_name, shuf_name))
return tmp_name
def generate_copy_item(register, source_index, target_index):
register_name = source_reg_name[register]
extract = '_mm_extract_epi16(%s, %d)' % (register_name, source_index)
lines.append('%s = _mm_insert_epi16(%s, %s, %d);' % (target_name, target_name, extract, target_index))
assert len(shuffle) >= 2
# 2a. initalize row register
t0 = generate_shuffle(*shuffle[0])
del shuffle[0]
t1 = generate_shuffle(*shuffle[0])
del shuffle[0]
lines.append('__m128i %s = _mm_or_si128(%s, %s);' % (target_name, t0, t1))
# 2b. update row register with shuffled registers
for instr in shuffle:
tk = generate_shuffle(*instr)
lines.append('%s = _mm_or_si128(%s, %s);' % (target_name, target_name, tk))
# 2c. copy individual items
for instr in copy_item:
generate_copy_item(*instr)
return lines
def get_target_order(row):
indices = []
used_registers = set()
for index in row:
register = index / 8
word = index % 8
indices.append((register, word))
used_registers.add(register)
used_registers = list(sorted(used_registers))
return (indices, used_registers)
def get_pshufb_bytes(shuffled_register, indices):
res = []
for register, word in indices:
if register == shuffled_register:
res.append(word * 2 + 0)
res.append(word * 2 + 1)
else:
res.append(-1)
res.append(-1)
return res
def get_single_item_indices(source_register, indices):
res = None
for target_index, (register, source_index) in enumerate(indices):
if register != source_register:
continue
if res is None:
res = (target_index, source_index)
else:
return None
return res;
if __name__ == '__main__':
main()
| 27.722581
| 114
| 0.572958
|
79532c8ab6a0523e82acb426077d2533ad58310a
| 4,024
|
py
|
Python
|
alipay/aop/api/domain/DeliveryInfo.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/DeliveryInfo.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/DeliveryInfo.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class DeliveryInfo(object):
def __init__(self):
self._city_code = None
self._consignee = None
self._contact_phone = None
self._detail_address = None
self._district_code = None
self._province_code = None
self._zip_code = None
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def consignee(self):
return self._consignee
@consignee.setter
def consignee(self, value):
self._consignee = value
@property
def contact_phone(self):
return self._contact_phone
@contact_phone.setter
def contact_phone(self, value):
self._contact_phone = value
@property
def detail_address(self):
return self._detail_address
@detail_address.setter
def detail_address(self, value):
self._detail_address = value
@property
def district_code(self):
return self._district_code
@district_code.setter
def district_code(self, value):
self._district_code = value
@property
def province_code(self):
return self._province_code
@province_code.setter
def province_code(self, value):
self._province_code = value
@property
def zip_code(self):
return self._zip_code
@zip_code.setter
def zip_code(self, value):
self._zip_code = value
def to_alipay_dict(self):
params = dict()
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.consignee:
if hasattr(self.consignee, 'to_alipay_dict'):
params['consignee'] = self.consignee.to_alipay_dict()
else:
params['consignee'] = self.consignee
if self.contact_phone:
if hasattr(self.contact_phone, 'to_alipay_dict'):
params['contact_phone'] = self.contact_phone.to_alipay_dict()
else:
params['contact_phone'] = self.contact_phone
if self.detail_address:
if hasattr(self.detail_address, 'to_alipay_dict'):
params['detail_address'] = self.detail_address.to_alipay_dict()
else:
params['detail_address'] = self.detail_address
if self.district_code:
if hasattr(self.district_code, 'to_alipay_dict'):
params['district_code'] = self.district_code.to_alipay_dict()
else:
params['district_code'] = self.district_code
if self.province_code:
if hasattr(self.province_code, 'to_alipay_dict'):
params['province_code'] = self.province_code.to_alipay_dict()
else:
params['province_code'] = self.province_code
if self.zip_code:
if hasattr(self.zip_code, 'to_alipay_dict'):
params['zip_code'] = self.zip_code.to_alipay_dict()
else:
params['zip_code'] = self.zip_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DeliveryInfo()
if 'city_code' in d:
o.city_code = d['city_code']
if 'consignee' in d:
o.consignee = d['consignee']
if 'contact_phone' in d:
o.contact_phone = d['contact_phone']
if 'detail_address' in d:
o.detail_address = d['detail_address']
if 'district_code' in d:
o.district_code = d['district_code']
if 'province_code' in d:
o.province_code = d['province_code']
if 'zip_code' in d:
o.zip_code = d['zip_code']
return o
| 30.717557
| 79
| 0.599404
|
79532dc7348c06e8a4fc8341682fe764848d78b5
| 3,111
|
py
|
Python
|
src/lsqr_spark.py
|
chocjy/randomized-LS-solvers
|
4c1c9211ee56a7344baebc6d36e33d72ccb620b9
|
[
"Apache-2.0"
] | 12
|
2015-04-14T08:03:08.000Z
|
2020-05-08T12:07:32.000Z
|
src/lsqr_spark.py
|
chocjy/randomized-LS-solvers
|
4c1c9211ee56a7344baebc6d36e33d72ccb620b9
|
[
"Apache-2.0"
] | null | null | null |
src/lsqr_spark.py
|
chocjy/randomized-LS-solvers
|
4c1c9211ee56a7344baebc6d36e33d72ccb620b9
|
[
"Apache-2.0"
] | 3
|
2015-04-14T08:02:50.000Z
|
2018-09-22T01:55:17.000Z
|
from math import sqrt, log
import numpy as np
from numpy.linalg import norm, lstsq
import time
import logging
logger = logging.getLogger(__name__)
def lsqr_spark( matrix_Ab, m, n, N, tol=1e-14, iter_lim=None):
"""
A simple version of LSQR on Spark
"""
x_iter = []
time_iter = []
t0 = time.time()
logger.info('In LSQR!')
eps = 32*np.finfo(float).eps; # slightly larger than eps
if tol < eps:
tol = eps
elif tol >= 1:
tol = 1-eps
max_n_stag = 3
u = matrix_Ab.get_b()
beta = norm(u)
u /= beta
v = np.dot( matrix_Ab.ltimes_vec(u), N ).squeeze() # v is an array
alpha = norm(v)
if alpha != 0:
v /= alpha
w = v.copy()
x = np.zeros(n)
phibar = beta
rhobar = alpha
nrm_a = 0.0
cnd_a = 0.0
sq_d = 0.0
nrm_r = beta
nrm_ar_0 = alpha*beta
if nrm_ar_0 == 0: # alpha == 0 || beta == 0
return x, 0, 0
nrm_x = 0
sq_x = 0
z = 0
cs2 = -1
sn2 = 0
stag = 0
flag = -1
if iter_lim is None:
iter_lim = np.max( [20, 2*np.min([m,n])] )
for itn in xrange(int(iter_lim)):
u = matrix_Ab.rtimes_vec(np.dot(N,v)).squeeze() - alpha*u
beta = norm(u)
u /= beta
nrm_a = sqrt(nrm_a**2 + alpha**2 + beta**2)
v = np.dot( matrix_Ab.ltimes_vec(u), N).squeeze() - beta*v
alpha = norm(v)
v /= alpha
rho = sqrt(rhobar**2+beta**2)
cs = rhobar/rho
sn = beta/rho
theta = sn*alpha
rhobar = -cs*alpha
phi = cs*phibar
phibar = sn*phibar
x = x + (phi/rho)*w
w = v-(theta/rho)*w
# estimate of norm(r)
nrm_r = phibar
# estimate of norm(A'*r)
nrm_ar = phibar*alpha*np.abs(cs)
# check convergence
if nrm_ar < tol*nrm_ar_0:
flag = 0
# break
if nrm_ar < eps*nrm_a*nrm_r:
flag = 0
# break
# estimate of cond(A)
sq_w = np.dot(w,w)
nrm_w = sqrt(sq_w)
sq_d += sq_w/(rho**2)
cnd_a = nrm_a*sqrt(sq_d)
# check condition number
if cnd_a > 1/eps:
flag = 1
# break
# check stagnation
if abs(phi/rho)*nrm_w < eps*nrm_x:
stag += 1
else:
stag = 0
if stag >= max_n_stag:
flag = 1
# break
# estimate of norm(x)
delta = sn2*rho
gambar = -cs2*rho
rhs = phi - delta*z
zbar = rhs/gambar
nrm_x = sqrt(sq_x + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar/gamma
sn2 = theta /gamma
z = rhs /gamma
sq_x += z**2
x_iter.append(x)
time_iter.append( time.time() - t0 )
logger.info("Finished one iteration!")
y_iter = x_iter
x_iter = [np.dot(N,x) for x in x_iter]
return x_iter, y_iter, time_iter
| 21.163265
| 70
| 0.472195
|
79532e44a6790c5e602fb7495a583ffe4db37a16
| 87
|
py
|
Python
|
pricePrediction/predict/__main__.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | null | null | null |
pricePrediction/predict/__main__.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | null | null | null |
pricePrediction/predict/__main__.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | 1
|
2022-03-02T16:21:16.000Z
|
2022-03-02T16:21:16.000Z
|
from pricePrediction.predict.predict import main
if __name__ == '__main__':
main()
| 21.75
| 48
| 0.747126
|
79532e9aa8ba7972b089a44884f9a7909d57f8ab
| 297
|
py
|
Python
|
utils/Serializable.py
|
Wanket/RnD-py
|
20ffe47adf96b32272304a9f6b1f1a98598f721f
|
[
"MIT"
] | null | null | null |
utils/Serializable.py
|
Wanket/RnD-py
|
20ffe47adf96b32272304a9f6b1f1a98598f721f
|
[
"MIT"
] | null | null | null |
utils/Serializable.py
|
Wanket/RnD-py
|
20ffe47adf96b32272304a9f6b1f1a98598f721f
|
[
"MIT"
] | null | null | null |
import json
from typing import TypeVar, Type
class Serializable:
def serialize(self) -> str:
return json.dumps(self.__dict__, default=lambda o: o.__dict__)
T = TypeVar("T")
@staticmethod
def deserialize(t: Type[T], data: str) -> T:
return t(**json.loads(data))
| 21.214286
| 70
| 0.646465
|
79532f4f9d0763cf835e2952a5d6048db714160d
| 22
|
py
|
Python
|
project/todo/models/__init__.py
|
dagothar/django-todo
|
dd360e873b2a92cb542e623d6cc43bcd6bfe3570
|
[
"MIT"
] | null | null | null |
project/todo/models/__init__.py
|
dagothar/django-todo
|
dd360e873b2a92cb542e623d6cc43bcd6bfe3570
|
[
"MIT"
] | null | null | null |
project/todo/models/__init__.py
|
dagothar/django-todo
|
dd360e873b2a92cb542e623d6cc43bcd6bfe3570
|
[
"MIT"
] | null | null | null |
from task import Task
| 11
| 21
| 0.818182
|
79532f90a8f34a234db2fdd0ad903b7758866a66
| 7,884
|
py
|
Python
|
src/models/forecast/web_sarima.py
|
webclinic017/advisor_app
|
9cdab4aca19e193850943ef8308bad5c5ea0415d
|
[
"MIT"
] | null | null | null |
src/models/forecast/web_sarima.py
|
webclinic017/advisor_app
|
9cdab4aca19e193850943ef8308bad5c5ea0415d
|
[
"MIT"
] | null | null | null |
src/models/forecast/web_sarima.py
|
webclinic017/advisor_app
|
9cdab4aca19e193850943ef8308bad5c5ea0415d
|
[
"MIT"
] | null | null | null |
import warnings
from datetime import datetime, date
import pandas as pd
import numpy as np
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import itertools
import streamlit as st
import yfinance as yf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from yahooquery import Ticker
import src.tools.functions as f0
warnings.filterwarnings("ignore")
pd.plotting.register_matplotlib_converters()
plt.style.use("seaborn-poster")
sm, med, lg = "20", "25", "30"
plt.rcParams["font.size"] = sm # controls default text sizes
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=sm) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
plt.rcParams["figure.figsize"] = [20, 10]
plt.rcParams["figure.dpi"] = 100
plt.rcParams["axes.facecolor"] = "silver"
class The_SARIMA_Model(object):
def __init__(self, stock):
self.sss = stock
self.company = f0.company_longName(self.sss)
def dataHull(self):
self.start = "2011-10-01"
self.end = "2021-10-19"
self.x_data = yf.download(self.sss, start=self.end)["Adj Close"]
self.x_data.columns = [self.company]
self.spData = yf.download(self.sss, period='max')
self.spData = pd.DataFrame(self.spData.loc[:self.end])
self.dataSP = pd.DataFrame(self.spData["Close"])
self.dataSP.columns = [self.sss]
self.dataSP.index = pd.to_datetime(self.dataSP.index)
self.df_settle = self.spData["Close"].resample("BM").ffill().dropna()
self.df_rolling = self.df_settle.rolling(12)
self.df_mean = self.df_rolling.mean()
self.df_std = self.df_rolling.std()
def adf(self):
self.dataHull()
self.result = adfuller(self.df_settle)
self.critical_values = self.result[4]
self.df_log = np.log(self.df_settle)
self.df_log_ma = self.df_log.rolling(2).mean()
self.df_detrend = self.df_log - self.df_log_ma
self.df_detrend.dropna(inplace=True)
# Mean and standard deviation of detrended data
self.df_detrend_rolling = self.df_detrend.rolling(12)
self.df_detrend_ma = self.df_detrend_rolling.mean()
self.df_detrend_std = self.df_detrend_rolling.std()
self.result2 = adfuller(self.df_detrend)
self.critical_values2 = self.result2[4]
self.df_log_diff = self.df_log.diff(periods=3).dropna()
# Mean and standard deviation of differenced data
self.df_diff_rolling = self.df_log_diff.rolling(12)
self.df_diff_ma = self.df_diff_rolling.mean()
self.df_diff_std = self.df_diff_rolling.std()
def seasonal_decomp(self):
self.adf()
self.decompose_result = seasonal_decompose(self.df_log.dropna(), period=12)
self.df_trend = self.decompose_result.trend
self.df_season = self.decompose_result.seasonal
self.df_residual = self.decompose_result.resid
self.df_log_diff = self.df_residual.diff().dropna()
# Mean and standard deviation of differenced data
self.df_diff_rolling = self.df_log_diff.rolling(12)
self.df_diff_ma = self.df_diff_rolling.mean()
self.df_diff_std = self.df_diff_rolling.std()
self.result = adfuller(self.df_residual.dropna())
self.critical_values = self.result[4]
def arima_grid_search(self, s=12):
self.seasonal_decomp()
self.s = s
self.p = self.d = self.q = range(2)
self.param_combinations = list(itertools.product(self.p, self.d, self.q))
self.lowest_aic, self.pdq, self.pdqs = None, None, None
self.total_iterations = 0
for order in self.param_combinations:
for (self.p, self.q, self.d) in self.param_combinations:
self.seasonal_order = (self.p, self.q, self.d, self.s)
self.total_iterations += 1
try:
self.model = SARIMAX(
self.df_settle,
order=order,
seasonal_order=self.seasonal_order,
enforce_stationarity=False,
enforce_invertibility=False,
disp=False,
)
self.model_result = self.model.fit(maxiter=200, disp=False)
if not self.lowest_aic or self.model_result.aic < self.lowest_aic:
self.lowest_aic = self.model_result.aic
self.pdq, self.pdqs = order, self.seasonal_order
except Exception:
continue
return self.lowest_aic, self.pdq, self.pdqs
def fitModel_to_SARIMAX(self):
self.arima_grid_search()
self.model = SARIMAX(
self.df_settle,
order=self.pdq,
seasonal_order=self.seasonal_order,
enforce_stationarity=True,
enforce_invertibility=True,
disp=False,
)
self.model_results = self.model.fit(maxiter=200, disp=False)
return self.model_results
def predict(self):
self.fitModel_to_SARIMAX()
self.n = len(self.df_settle.index)
self.prediction = self.model_results.get_prediction(start=self.n - 12 * 5, end=self.n + 12)
self.prediction_ci = self.prediction.conf_int()
self.prediction_ci.columns=['Lower_Confidence_Boundary', 'Upper_Confidence_Boundary']
fig, ax = plt.subplots()
ax = self.df_settle['2019':].plot(label='Live_Price', color='k')
self.prediction_ci['2019':].plot(
ax=ax,
style=['--', '--'],
color=['r','g'],
label='predicted/forecasted',
)
ci_index = self.prediction_ci.index
lower_ci = self.prediction_ci.iloc[:, 0]
upper_ci = self.prediction_ci.iloc[:, 1]
ax.fill_between(
ci_index,
lower_ci,
upper_ci,
color='c',
alpha=.01,
label='95% Confidence Interval'
)
ax.fill_between(
ci_index,
(self.prediction_ci.iloc[:, 0]),
(self.prediction_ci.iloc[:, 1]),
color='r',
where=ci_index<'2020 11/30',
alpha=.2,
label='Training'
)
ax.fill_between(
ci_index,
(self.prediction_ci.iloc[:, 0]),
(self.prediction_ci.iloc[:, 1]),
color='gold',
where=ci_index.isin(ci_index[43:60]),
alpha=.2,
label='Testing'
)
ax.fill_between(
ci_index,
(self.prediction_ci.iloc[:, 0]),
(self.prediction_ci.iloc[:, 1]),
color='darkgreen',
where=ci_index.isin(ci_index[59:]),
alpha=.2,
label='Forecast'
)
ax.set_xlabel('Time (years)')
ax.set_ylabel('Prices')
ax.axvline(x='2020 06/25', color = 'k')
ax.axvline(x='2021 10/25', color = 'k')
ax.set_facecolor('white')
plt.grid(True, which='major', axis='both', color='k', alpha=.34)
ax.legend()
plt.title('SARIMA FORECAST')
l = plt.legend(loc='best', shadow=True, fontsize='x-large')
for text in l.get_texts():
text.set_color("k")
text.set_fontweight(13)
text.set_fontsize(13)
l.get_frame().set_facecolor('white');
st.pyplot(fig)
| 36.841121
| 99
| 0.601598
|
7953308dff57badc907c2979df0f52e41e1db2e0
| 36,007
|
py
|
Python
|
python/ray/actor.py
|
arcelien/ray
|
06c768823c781244bf0be183cc7876641df0c290
|
[
"Apache-2.0"
] | 2
|
2019-10-23T07:31:45.000Z
|
2019-10-23T07:31:47.000Z
|
python/ray/actor.py
|
arcelien/ray
|
06c768823c781244bf0be183cc7876641df0c290
|
[
"Apache-2.0"
] | null | null | null |
python/ray/actor.py
|
arcelien/ray
|
06c768823c781244bf0be183cc7876641df0c290
|
[
"Apache-2.0"
] | 1
|
2019-05-23T20:33:09.000Z
|
2019-05-23T20:33:09.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import hashlib
import inspect
import logging
import six
import sys
import threading
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from ray.function_manager import FunctionDescriptor
import ray.ray_constants as ray_constants
import ray.signature as signature
import ray.worker
from ray.utils import _random_string
from ray import (ObjectID, ActorID, ActorHandleID, ActorClassID, TaskID,
DriverID)
logger = logging.getLogger(__name__)
def compute_actor_handle_id(actor_handle_id, num_forks):
"""Deterministically compute an actor handle ID.
A new actor handle ID is generated when it is forked from another actor
handle. The new handle ID is computed as hash(old_handle_id || num_forks).
Args:
actor_handle_id (common.ObjectID): The original actor handle ID.
num_forks: The number of times the original actor handle has been
forked so far.
Returns:
An ID for the new actor handle.
"""
assert isinstance(actor_handle_id, ActorHandleID)
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.binary())
handle_id_hash.update(str(num_forks).encode("ascii"))
handle_id = handle_id_hash.digest()
return ActorHandleID(handle_id)
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id):
"""Deterministically compute an actor handle ID in the non-forked case.
This code path is used whenever an actor handle is pickled and unpickled
(for example, if a remote function closes over an actor handle). Then,
whenever the actor handle is used, a new actor handle ID will be generated
on the fly as a deterministic function of the actor ID, the previous actor
handle ID and the current task ID.
TODO(rkn): It may be possible to cause problems by closing over multiple
actor handles in a remote function, which then get unpickled and give rise
to the same actor handle IDs.
Args:
actor_handle_id: The original actor handle ID.
current_task_id: The ID of the task that is unpickling the handle.
Returns:
An ID for the new actor handle.
"""
assert isinstance(actor_handle_id, ActorHandleID)
assert isinstance(current_task_id, TaskID)
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.binary())
handle_id_hash.update(current_task_id.binary())
handle_id = handle_id_hash.digest()
return ActorHandleID(handle_id)
def method(*args, **kwargs):
"""Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method.
"""
assert len(args) == 0
assert len(kwargs) == 1
assert "num_return_vals" in kwargs
num_return_vals = kwargs["num_return_vals"]
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method
# Create objects to wrap method invocations. This is done so that we can
# invoke methods with actor.method.remote() instead of actor.method().
class ActorMethod(object):
def __init__(self, actor, method_name, num_return_vals):
self._actor = actor
self._method_name = method_name
self._num_return_vals = num_return_vals
def __call__(self, *args, **kwargs):
raise Exception("Actor methods cannot be called directly. Instead "
"of running 'object.{}()', try "
"'object.{}.remote()'.".format(self._method_name,
self._method_name))
def remote(self, *args, **kwargs):
return self._remote(args, kwargs)
def _remote(self, args=None, kwargs=None, num_return_vals=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
if num_return_vals is None:
num_return_vals = self._num_return_vals
return self._actor._actor_method_call(
self._method_name,
args=args,
kwargs=kwargs,
num_return_vals=num_return_vals)
class ActorClass(object):
"""An actor class.
This is a decorated class. It can be used to create actors.
Attributes:
_modified_class: The original class that was decorated (with some
additional methods added like __ray_terminate__).
_class_id: The ID of this actor class.
_class_name: The name of this class.
_num_cpus: The default number of CPUs required by the actor creation
task.
_num_gpus: The default number of GPUs required by the actor creation
task.
_resources: The default resources required by the actor creation task.
_actor_method_cpus: The number of CPUs required by actor method tasks.
_exported: True if the actor class has been exported and false
otherwise.
_actor_methods: The actor methods.
_method_signatures: The signatures of the methods.
_actor_method_names: The names of the actor methods.
_actor_method_num_return_vals: The default number of return values for
each actor method.
"""
def __init__(self, modified_class, class_id, max_reconstructions, num_cpus,
num_gpus, resources):
self._modified_class = modified_class
self._class_id = class_id
self._class_name = modified_class.__name__
self._max_reconstructions = max_reconstructions
self._num_cpus = num_cpus
self._num_gpus = num_gpus
self._resources = resources
self._exported = False
self._actor_methods = inspect.getmembers(
self._modified_class, ray.utils.is_function_or_method)
self._actor_method_names = [
method_name for method_name, _ in self._actor_methods
]
constructor_name = "__init__"
if constructor_name not in self._actor_method_names:
# Add __init__ if it does not exist.
# Actor creation will be executed with __init__ together.
# Assign an __init__ function will avoid many checks later on.
def __init__(self):
pass
self._modified_class.__init__ = __init__
self._actor_method_names.append(constructor_name)
self._actor_methods.append((constructor_name, __init__))
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
self._method_signatures = {}
self._actor_method_num_return_vals = {}
for method_name, method in self._actor_methods:
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, there may not be much the user can do about it.
signature.check_signature_supported(method, warn=True)
self._method_signatures[method_name] = signature.extract_signature(
method, ignore_first=not ray.utils.is_class_method(method))
# Set the default number of return values for this method.
if hasattr(method, "__ray_num_return_vals__"):
self._actor_method_num_return_vals[method_name] = (
method.__ray_num_return_vals__)
else:
self._actor_method_num_return_vals[method_name] = (
ray_constants.DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS)
def __call__(self, *args, **kwargs):
raise Exception("Actors methods cannot be instantiated directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
self._class_name, self._class_name))
def remote(self, *args, **kwargs):
"""Create an actor.
Args:
args: These arguments are forwarded directly to the actor
constructor.
kwargs: These arguments are forwarded directly to the actor
constructor.
Returns:
A handle to the newly created actor.
"""
return self._remote(args=args, kwargs=kwargs)
def _remote(self,
args=None,
kwargs=None,
num_cpus=None,
num_gpus=None,
resources=None):
"""Create an actor.
This method allows more flexibility than the remote method because
resource requirements can be specified and override the defaults in the
decorator.
Args:
args: The arguments to forward to the actor constructor.
kwargs: The keyword arguments to forward to the actor constructor.
num_cpus: The number of CPUs required by the actor creation task.
num_gpus: The number of GPUs required by the actor creation task.
resources: The custom resources required by the actor creation
task.
Returns:
A handle to the newly created actor.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
worker = ray.worker.get_global_worker()
if worker.mode is None:
raise Exception("Actors cannot be created before ray.init() "
"has been called.")
actor_id = ActorID(_random_string())
# The actor cursor is a dummy object representing the most recent
# actor method invocation. For each subsequent method invocation,
# the current cursor should be added as a dependency, and then
# updated to reflect the new invocation.
actor_cursor = None
# Set the actor's default resources if not already set. First three
# conditions are to check that no resources were specified in the
# decorator. Last three conditions are to check that no resources were
# specified when _remote() was called.
if (self._num_cpus is None and self._num_gpus is None
and self._resources is None and num_cpus is None
and num_gpus is None and resources is None):
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
cpus_to_use = ray_constants.DEFAULT_ACTOR_CREATION_CPU_SIMPLE
actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SIMPLE
else:
# If any resources are specified (here or in decorator), then
# all resources are acquired for the actor's lifetime and no
# resources are associated with methods.
cpus_to_use = (ray_constants.DEFAULT_ACTOR_CREATION_CPU_SPECIFIED
if self._num_cpus is None else self._num_cpus)
actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SPECIFIED
# Do not export the actor class or the actor if run in LOCAL_MODE
# Instead, instantiate the actor locally and add it to the worker's
# dictionary
if worker.mode == ray.LOCAL_MODE:
worker.actors[actor_id] = self._modified_class(
*copy.deepcopy(args), **copy.deepcopy(kwargs))
else:
# Export the actor.
if not self._exported:
worker.function_actor_manager.export_actor_class(
self._modified_class, self._actor_method_names)
self._exported = True
resources = ray.utils.resources_from_resource_arguments(
cpus_to_use, self._num_gpus, self._resources, num_cpus,
num_gpus, resources)
# If the actor methods require CPU resources, then set the required
# placement resources. If actor_placement_resources is empty, then
# the required placement resources will be the same as resources.
actor_placement_resources = {}
assert actor_method_cpu in [0, 1]
if actor_method_cpu == 1:
actor_placement_resources = resources.copy()
actor_placement_resources["CPU"] += 1
function_name = "__init__"
function_signature = self._method_signatures[function_name]
creation_args = signature.extend_args(function_signature, args,
kwargs)
function_descriptor = FunctionDescriptor(
self._modified_class.__module__, function_name,
self._modified_class.__name__)
[actor_cursor] = worker.submit_task(
function_descriptor,
creation_args,
actor_creation_id=actor_id,
max_actor_reconstructions=self._max_reconstructions,
num_return_vals=1,
resources=resources,
placement_resources=actor_placement_resources)
assert isinstance(actor_cursor, ObjectID)
actor_handle = ActorHandle(
actor_id, self._modified_class.__module__, self._class_name,
actor_cursor, self._actor_method_names, self._method_signatures,
self._actor_method_num_return_vals, actor_cursor, actor_method_cpu,
worker.task_driver_id)
# We increment the actor counter by 1 to account for the actor creation
# task.
actor_handle._ray_actor_counter += 1
return actor_handle
@property
def class_id(self):
return self._class_id
class ActorHandle(object):
"""A handle to an actor.
The fields in this class are prefixed with _ray_ to hide them from the user
and to avoid collision with actor method names.
An ActorHandle can be created in three ways. First, by calling .remote() on
an ActorClass. Second, by passing an actor handle into a task (forking the
ActorHandle). Third, by directly serializing the ActorHandle (e.g., with
cloudpickle).
Attributes:
_ray_actor_id: The ID of the corresponding actor.
_ray_module_name: The module name of this actor.
_ray_actor_handle_id: The ID of this handle. If this is the "original"
handle for an actor (as opposed to one created by passing another
handle into a task), then this ID must be NIL_ID. If this
ActorHandle was created by forking an existing ActorHandle, then
this ID must be computed deterministically via
compute_actor_handle_id. If this ActorHandle was created by an
out-of-band mechanism (e.g., pickling), then this must be None (in
this case, a new actor handle ID will be generated on the fly every
time a method is invoked).
_ray_actor_cursor: The actor cursor is a dummy object representing the
most recent actor method invocation. For each subsequent method
invocation, the current cursor should be added as a dependency, and
then updated to reflect the new invocation.
_ray_actor_counter: The number of actor method invocations that we've
called so far.
_ray_actor_method_names: The names of the actor methods.
_ray_method_signatures: The signatures of the actor methods.
_ray_method_num_return_vals: The default number of return values for
each method.
_ray_class_name: The name of the actor class.
_ray_actor_forks: The number of times this handle has been forked.
_ray_actor_creation_dummy_object_id: The dummy object ID from the actor
creation task.
_ray_actor_method_cpus: The number of CPUs required by actor methods.
_ray_original_handle: True if this is the original actor handle for a
given actor. If this is true, then the actor will be destroyed when
this handle goes out of scope.
_ray_actor_driver_id: The driver ID of the job that created the actor
(it is possible that this ActorHandle exists on a driver with a
different driver ID).
_ray_new_actor_handles: The new actor handles that were created from
this handle since the last task on this handle was submitted. This
is used to garbage-collect dummy objects that are no longer
necessary in the backend.
"""
def __init__(self,
actor_id,
module_name,
class_name,
actor_cursor,
actor_method_names,
method_signatures,
method_num_return_vals,
actor_creation_dummy_object_id,
actor_method_cpus,
actor_driver_id,
actor_handle_id=None):
assert isinstance(actor_id, ActorID)
assert isinstance(actor_driver_id, DriverID)
self._ray_actor_id = actor_id
self._ray_module_name = module_name
# False if this actor handle was created by forking or pickling. True
# if it was created by the _serialization_helper function.
self._ray_original_handle = actor_handle_id is None
if self._ray_original_handle:
self._ray_actor_handle_id = ActorHandleID.nil()
else:
assert isinstance(actor_handle_id, ActorHandleID)
self._ray_actor_handle_id = actor_handle_id
self._ray_actor_cursor = actor_cursor
self._ray_actor_counter = 0
self._ray_actor_method_names = actor_method_names
self._ray_method_signatures = method_signatures
self._ray_method_num_return_vals = method_num_return_vals
self._ray_class_name = class_name
self._ray_actor_forks = 0
self._ray_actor_creation_dummy_object_id = (
actor_creation_dummy_object_id)
self._ray_actor_method_cpus = actor_method_cpus
self._ray_actor_driver_id = actor_driver_id
self._ray_new_actor_handles = []
self._ray_actor_lock = threading.Lock()
def _actor_method_call(self,
method_name,
args=None,
kwargs=None,
num_return_vals=None):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
num_return_vals (int): The number of return values for the method.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in LOCAL_MODE
# Copy args to prevent the function from mutating them.
if worker.mode == ray.LOCAL_MODE:
return getattr(worker.actors[self._ray_actor_id],
method_name)(*copy.deepcopy(args))
function_descriptor = FunctionDescriptor(
self._ray_module_name, method_name, self._ray_class_name)
with self._ray_actor_lock:
object_ids = worker.submit_task(
function_descriptor,
args,
actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
actor_creation_dummy_object_id=(
self._ray_actor_creation_dummy_object_id),
execution_dependencies=[self._ray_actor_cursor],
new_actor_handles=self._ray_new_actor_handles,
# We add one for the dummy return ID.
num_return_vals=num_return_vals + 1,
resources={"CPU": self._ray_actor_method_cpus},
placement_resources={},
driver_id=self._ray_actor_driver_id,
)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
self._ray_actor_cursor = object_ids.pop()
# We have notified the backend of the new actor handles to expect
# since the last task was submitted, so clear the list.
self._ray_new_actor_handles = []
if len(object_ids) == 1:
object_ids = object_ids[0]
elif len(object_ids) == 0:
object_ids = None
return object_ids
# Make tab completion work.
def __dir__(self):
return self._ray_actor_method_names
def __getattribute__(self, attr):
try:
# Check whether this is an actor method.
actor_method_names = object.__getattribute__(
self, "_ray_actor_method_names")
if attr in actor_method_names:
# We create the ActorMethod on the fly here so that the
# ActorHandle doesn't need a reference to the ActorMethod.
# The ActorMethod has a reference to the ActorHandle and
# this was causing cyclic references which were prevent
# object deallocation from behaving in a predictable
# manner.
return ActorMethod(self, attr,
self._ray_method_num_return_vals[attr])
except AttributeError:
pass
# If the requested attribute is not a registered method, fall back
# to default __getattribute__.
return object.__getattribute__(self, attr)
def __repr__(self):
return "Actor({}, {})".format(self._ray_class_name,
self._ray_actor_id.hex())
def __del__(self):
"""Kill the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote(). TODO(rkn): Even without passing handles around,
# this is not the right policy. the actor should be alive as long as
# there are ANY handles in scope in the process that created the actor,
# not just the first one.
worker = ray.worker.get_global_worker()
if (worker.mode == ray.worker.SCRIPT_MODE
and self._ray_actor_driver_id.binary() != worker.worker_id):
# If the worker is a driver and driver id has changed because
# Ray was shut down re-initialized, the actor is already cleaned up
# and we don't need to send `__ray_terminate__` again.
logger.warning(
"Actor is garbage collected in the wrong driver." +
" Actor id = %s, class name = %s.", self._ray_actor_id,
self._ray_class_name)
return
if worker.connected and self._ray_original_handle:
# TODO(rkn): Should we be passing in the actor cursor as a
# dependency here?
self.__ray_terminate__.remote()
@property
def _actor_id(self):
return self._ray_actor_id
@property
def _actor_handle_id(self):
return self._ray_actor_handle_id
def _serialization_helper(self, ray_forking):
"""This is defined in order to make pickling work.
Args:
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
Returns:
A dictionary of the information needed to reconstruct the object.
"""
if ray_forking:
actor_handle_id = compute_actor_handle_id(
self._ray_actor_handle_id, self._ray_actor_forks)
else:
actor_handle_id = self._ray_actor_handle_id
# Note: _ray_actor_cursor and _ray_actor_creation_dummy_object_id
# could be None.
state = {
"actor_id": self._ray_actor_id,
"actor_handle_id": actor_handle_id,
"module_name": self._ray_module_name,
"class_name": self._ray_class_name,
"actor_cursor": self._ray_actor_cursor,
"actor_method_names": self._ray_actor_method_names,
"method_signatures": self._ray_method_signatures,
"method_num_return_vals": self._ray_method_num_return_vals,
# Actors in local mode don't have dummy objects.
"actor_creation_dummy_object_id": self.
_ray_actor_creation_dummy_object_id,
"actor_method_cpus": self._ray_actor_method_cpus,
"actor_driver_id": self._ray_actor_driver_id,
"ray_forking": ray_forking
}
if ray_forking:
self._ray_actor_forks += 1
new_actor_handle_id = actor_handle_id
else:
# The execution dependency for a pickled actor handle is never safe
# to release, since it could be unpickled and submit another
# dependent task at any time. Therefore, we notify the backend of a
# random handle ID that will never actually be used.
new_actor_handle_id = ActorHandleID(_random_string())
# Notify the backend to expect this new actor handle. The backend will
# not release the cursor for any new handles until the first task for
# each of the new handles is submitted.
# NOTE(swang): There is currently no garbage collection for actor
# handles until the actor itself is removed.
self._ray_new_actor_handles.append(new_actor_handle_id)
return state
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
if state["ray_forking"]:
actor_handle_id = state["actor_handle_id"]
else:
# Right now, if the actor handle has been pickled, we create a
# temporary actor handle id for invocations.
# TODO(pcm): This still leads to a lot of actor handles being
# created, there should be a better way to handle pickled
# actor handles.
# TODO(swang): Accessing the worker's current task ID is not
# thread-safe.
# TODO(swang): Unpickling the same actor handle twice in the same
# task will break the application, and unpickling it twice in the
# same actor is likely a performance bug. We should consider
# logging a warning in these cases.
actor_handle_id = compute_actor_handle_id_non_forked(
state["actor_handle_id"], worker.current_task_id)
self.__init__(
state["actor_id"],
state["module_name"],
state["class_name"],
state["actor_cursor"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_creation_dummy_object_id"],
state["actor_method_cpus"],
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
state["actor_driver_id"],
actor_handle_id=actor_handle_id)
def __getstate__(self):
"""This code path is used by pickling but not by Ray forking."""
return self._serialization_helper(False)
def __setstate__(self, state):
"""This code path is used by pickling but not by Ray forking."""
return self._deserialization_helper(state, False)
def make_actor(cls, num_cpus, num_gpus, resources, max_reconstructions):
# Give an error if cls is an old-style class.
if not issubclass(cls, object):
raise TypeError(
"The @ray.remote decorator cannot be applied to old-style "
"classes. In Python 2, you must declare the class with "
"'class ClassName(object):' instead of 'class ClassName:'.")
if issubclass(cls, Checkpointable) and inspect.isabstract(cls):
raise TypeError(
"A checkpointable actor class should implement all abstract "
"methods in the `Checkpointable` interface.")
if max_reconstructions is None:
max_reconstructions = 0
if not (ray_constants.NO_RECONSTRUCTION <= max_reconstructions <=
ray_constants.INFINITE_RECONSTRUCTION):
raise Exception("max_reconstructions must be in range [%d, %d]." %
(ray_constants.NO_RECONSTRUCTION,
ray_constants.INFINITE_RECONSTRUCTION))
# Modify the class to have an additional method that will be used for
# terminating the worker.
class Class(cls):
def __ray_terminate__(self):
worker = ray.worker.get_global_worker()
if worker.mode != ray.LOCAL_MODE:
# Disconnect the worker from the raylet. The point of
# this is so that when the worker kills itself below, the
# raylet won't push an error message to the driver.
worker.raylet_client.disconnect()
sys.exit(0)
assert False, "This process should have terminated."
def __ray_checkpoint__(self):
"""Save a checkpoint.
This task saves the current state of the actor, the current task
frontier according to the raylet, and the checkpoint index
(number of tasks executed so far).
"""
worker = ray.worker.global_worker
if not isinstance(self, ray.actor.Checkpointable):
raise Exception(
"__ray_checkpoint__.remote() may only be called on actors "
"that implement ray.actor.Checkpointable")
return worker._save_actor_checkpoint()
Class.__module__ = cls.__module__
Class.__name__ = cls.__name__
class_id = ActorClassID(_random_string())
return ActorClass(Class, class_id, max_reconstructions, num_cpus, num_gpus,
resources)
ray.worker.global_worker.make_actor = make_actor
CheckpointContext = namedtuple(
"CheckpointContext",
[
# Actor's ID.
"actor_id",
# Number of tasks executed since last checkpoint.
"num_tasks_since_last_checkpoint",
# Time elapsed since last checkpoint, in milliseconds.
"time_elapsed_ms_since_last_checkpoint",
],
)
"""A namedtuple that contains information about actor's last checkpoint."""
Checkpoint = namedtuple(
"Checkpoint",
[
# ID of this checkpoint.
"checkpoint_id",
# The timestamp at which this checkpoint was saved,
# represented as milliseconds elapsed since Unix epoch.
"timestamp",
],
)
"""A namedtuple that represents a checkpoint."""
class Checkpointable(six.with_metaclass(ABCMeta, object)):
"""An interface that indicates an actor can be checkpointed."""
@abstractmethod
def should_checkpoint(self, checkpoint_context):
"""Whether this actor needs to be checkpointed.
This method will be called after every task. You should implement this
callback to decide whether this actor needs to be checkpointed at this
time, based on the checkpoint context, or any other factors.
Args:
checkpoint_context: A namedtuple that contains info about last
checkpoint.
Returns:
A boolean value that indicates whether this actor needs to be
checkpointed.
"""
pass
@abstractmethod
def save_checkpoint(self, actor_id, checkpoint_id):
"""Save a checkpoint to persistent storage.
If `should_checkpoint` returns true, this method will be called. You
should implement this callback to save actor's checkpoint and the given
checkpoint id to persistent storage.
Args:
actor_id: Actor's ID.
checkpoint_id: ID of this checkpoint. You should save it together
with actor's checkpoint data. And it will be used by the
`load_checkpoint` method.
Returns:
None.
"""
pass
@abstractmethod
def load_checkpoint(self, actor_id, available_checkpoints):
"""Load actor's previous checkpoint, and restore actor's state.
This method will be called when an actor is reconstructed, after
actor's constructor.
If the actor needs to restore from previous checkpoint, this function
should restore actor's state and return the checkpoint ID. Otherwise,
it should do nothing and return None.
Note, this method must return one of the checkpoint IDs in the
`available_checkpoints` list, or None. Otherwise, an exception will be
raised.
Args:
actor_id: Actor's ID.
available_checkpoints: A list of `Checkpoint` namedtuples that
contains all available checkpoint IDs and their timestamps,
sorted by timestamp in descending order.
Returns:
The ID of the checkpoint from which the actor was resumed, or None
if the actor should restart from the beginning.
"""
pass
@abstractmethod
def checkpoint_expired(self, actor_id, checkpoint_id):
"""Delete an expired checkpoint.
This method will be called when an checkpoint is expired. You should
implement this method to delete your application checkpoint data.
Note, the maximum number of checkpoints kept in the backend can be
configured at `RayConfig.num_actor_checkpoints_to_keep`.
Args:
actor_id: ID of the actor.
checkpoint_id: ID of the checkpoint that has expired.
Returns:
None.
"""
pass
def get_checkpoints_for_actor(actor_id):
"""Get the available checkpoints for the given actor ID, return a list
sorted by checkpoint timestamp in descending order.
"""
checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id)
if checkpoint_info is None:
return []
checkpoints = [
Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in
zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"])
]
return sorted(
checkpoints,
key=lambda checkpoint: checkpoint.timestamp,
reverse=True,
)
| 41.578522
| 79
| 0.642097
|
7953328ea49bd41aab9343a58a6743607ded8af7
| 4,823
|
py
|
Python
|
src/aprl/training/embedded_agents.py
|
fkamrani/adversarial-policies
|
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
|
[
"MIT"
] | 211
|
2019-02-22T08:07:25.000Z
|
2022-03-14T10:44:20.000Z
|
src/aprl/training/embedded_agents.py
|
fkamrani/adversarial-policies
|
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
|
[
"MIT"
] | 51
|
2019-02-08T01:39:49.000Z
|
2022-02-15T21:21:46.000Z
|
src/aprl/training/embedded_agents.py
|
fkamrani/adversarial-policies
|
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
|
[
"MIT"
] | 41
|
2019-04-23T05:01:49.000Z
|
2022-03-16T06:51:19.000Z
|
"""Wrappers to embed a fixed agent in an environment."""
from aprl.envs.multi_agent import VecMultiWrapper, _tuple_pop, _tuple_space_filter
class CurryVecEnv(VecMultiWrapper):
"""Substitutes in a fixed agent for one of the players in a VecMultiEnv.
The agent's session will be closed, if it exists, when the environment is closed."""
def __init__(self, venv, policy, agent_idx=0, deterministic=False):
"""Fixes one of the players in a VecMultiEnv.
:param venv(VecMultiEnv): the environments.
:param policy(Policy): the policy to use for the agent at agent_idx.
:param agent_idx(int): the index of the agent that should be fixed.
:return: a new VecMultiEnv with num_agents decremented. It behaves like env but
with all actions at index agent_idx set to those returned by agent."""
super().__init__(venv)
assert venv.num_agents >= 1 # allow currying the last agent
self.num_agents = venv.num_agents - 1
self.observation_space = _tuple_space_filter(self.observation_space, agent_idx)
self.action_space = _tuple_space_filter(self.action_space, agent_idx)
self._agent_to_fix = agent_idx
self._policy = policy
self._state = None
self._obs = None
self._dones = [False] * venv.num_envs
self.deterministic = deterministic
def step_async(self, actions):
action, self._state = self._policy.predict(
self._obs, state=self._state, mask=self._dones, deterministic=self.deterministic
)
actions.insert(self._agent_to_fix, action)
self.venv.step_async(actions)
def step_wait(self):
observations, rewards, self._dones, infos = self.venv.step_wait()
observations, self._obs = _tuple_pop(observations, self._agent_to_fix)
rewards, _ = _tuple_pop(rewards, self._agent_to_fix)
return observations, rewards, self._dones, infos
def reset(self):
observations = self.venv.reset()
observations, self._obs = _tuple_pop(observations, self._agent_to_fix)
return observations
def get_policy(self):
return self._policy
def get_curry_venv(self):
"""Helper method to locate self in a stack of nested VecEnvWrappers"""
return self
def set_curry_obs(self, obs, env_idx=None):
"""Setter for observation of embedded agent
:param obs ([float]) a vectorized observation from either one or all environments
:param env_idx (int,None) indices of observations to set. None means all.
"""
if env_idx is None:
self._obs = obs
else:
self._obs[env_idx] = obs
def get_curry_obs(self, env_idx=None):
"""Getter for observation of embedded agent
:param env_idx (int,None) indices of observations to get. None means all.
:return: ([float]) observations from specified environments
"""
if env_idx is None:
return self._obs
else:
return self._obs[env_idx]
def close(self):
if hasattr(self._policy, "sess") and self._policy.sess is not None:
self._policy.sess.close()
super().close()
class TransparentCurryVecEnv(CurryVecEnv):
"""CurryVecEnv that provides transparency data about its policy by updating infos dicts."""
def __init__(self, venv, policy, agent_idx=0, deterministic=False):
"""
:param venv (VecMultiEnv): the environments
:param policy (BaseRLModel): model which wraps a BasePolicy object
:param agent_idx (int): the index of the agent that should be fixed.
:return: a new VecMultiEnv with num_agents decremented. It behaves like env but
with all actions at index agent_idx set to those returned by agent."""
super().__init__(venv, policy, agent_idx, deterministic)
if not hasattr(self._policy.policy, "step_transparent"):
raise TypeError("Error: policy must be transparent")
self._action = None
def step_async(self, actions):
policy_out = self._policy.predict_transparent(
self._obs, state=self._state, mask=self._dones, deterministic=self.deterministic
)
self._action, self._state, self._data = policy_out
actions.insert(self._agent_to_fix, self._action)
self.venv.step_async(actions)
def step_wait(self):
observations, rewards, self._dones, infos = self.venv.step_wait()
observations, self._obs = _tuple_pop(observations, self._agent_to_fix)
for env_idx in range(self.num_envs):
env_data = {k: v[env_idx] for k, v in self._data.items()}
infos[env_idx][self._agent_to_fix].update(env_data)
return observations, rewards, self._dones, infos
| 41.93913
| 95
| 0.6695
|
795333cb2e4f4be8e83ada37895191f5d9f85f3c
| 3,595
|
py
|
Python
|
github/tests/PullRequestFile.py
|
dkavanagh/github-skill
|
6c38e6d16b367cb86f758e7cdac4131377ce31ce
|
[
"MIT"
] | null | null | null |
github/tests/PullRequestFile.py
|
dkavanagh/github-skill
|
6c38e6d16b367cb86f758e7cdac4131377ce31ce
|
[
"MIT"
] | null | null | null |
github/tests/PullRequestFile.py
|
dkavanagh/github-skill
|
6c38e6d16b367cb86f758e7cdac4131377ce31ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class PullRequestFile(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.file = self.g.get_user().get_repo("PyGithub").get_pull(31).get_files()[0]
def testAttributes(self):
self.assertEqual(self.file.additions, 1)
self.assertEqual(self.file.blob_url, "https://github.com/jacquev6/PyGithub/blob/8a4f306d4b223682dd19410d4a9150636ebe4206/codegen/templates/GithubObject.py")
self.assertEqual(self.file.changes, 2)
self.assertEqual(self.file.deletions, 1)
self.assertEqual(self.file.filename, "codegen/templates/GithubObject.py")
self.assertEqual(self.file.patch, '@@ -70,7 +70,7 @@ def __useAttributes( self, attributes ):\n \n # @toto No need to check if attribute is in attributes when attribute is mandatory\n {% for attribute in class.attributes|dictsort:"name" %}\n- if "{{ attribute.name }}" in attributes and attributes[ "{{ attribute.name }}" ] is not None:\n+ if "{{ attribute.name }}" in attributes and attributes[ "{{ attribute.name }}" ] is not None: # pragma no branch\n \n {% if attribute.type.cardinality == "scalar" %}\n {% if attribute.type.simple %}')
self.assertEqual(self.file.raw_url, "https://github.com/jacquev6/PyGithub/raw/8a4f306d4b223682dd19410d4a9150636ebe4206/codegen/templates/GithubObject.py")
self.assertEqual(self.file.sha, "8a4f306d4b223682dd19410d4a9150636ebe4206")
self.assertEqual(self.file.status, "modified")
# test __repr__() based on this attributes
self.assertEqual(self.file.__repr__(), 'File(sha="8a4f306d4b223682dd19410d4a9150636ebe4206", filename="codegen/templates/GithubObject.py")')
| 74.895833
| 570
| 0.539638
|
7953341be29433e88cb161bda9eb159ca012f198
| 3,279
|
py
|
Python
|
src/aiy/voicehat.py
|
zmsp/AIY-data-center-utility
|
c88b50e6ca952b1ba9deea5ea85ff38fd5ff42c1
|
[
"Apache-2.0"
] | 2
|
2017-10-30T14:46:10.000Z
|
2019-04-05T07:56:32.000Z
|
src/aiy/voicehat.py
|
zmsp/AIY-data-center-utility
|
c88b50e6ca952b1ba9deea5ea85ff38fd5ff42c1
|
[
"Apache-2.0"
] | 10
|
2018-12-20T10:22:11.000Z
|
2021-07-30T10:14:33.000Z
|
src/aiy/voicehat.py
|
zmsp/AIY-data-center-utility
|
c88b50e6ca952b1ba9deea5ea85ff38fd5ff42c1
|
[
"Apache-2.0"
] | 2
|
2018-12-28T22:33:51.000Z
|
2020-12-06T18:55:41.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drivers for shared functionality provided by the VoiceHat."""
import aiy._drivers._button
import aiy._drivers._led
import aiy._drivers._status_ui
# GPIO definitions (BCM)
_GPIO_BUTTON = 23
_GPIO_LED = 25
# Import LED class to expose the LED constants.
LED = aiy._drivers._led.LED
# Global variables. They are lazily initialized.
_voicehat_button = None
_voicehat_led = None
_status_ui = None
def get_button():
"""Returns a driver to the VoiceHat button.
The button driver detects edges on _GPIO_BUTTON. It can be used both
synchronously and asynchrously.
Synchronous usage:
button = aiy.voicehat.get_button()
button.wait_for_press()
# The above function does not return until the button is pressed.
my_recognizer.recognize()
...
Asynchronous usage:
def on_button_press(_):
print('The button is pressed!')
button = aiy.voicehat.get_button()
button.on_press(on_button_press)
# The console will print 'The button is pressed!' every time the button is
# pressed.
...
# To cancel the callback, pass None:
button.on_press(None)
# Calling wait_for_press() also cancels any callback.
"""
global _voicehat_button
if not _voicehat_button:
_voicehat_button = aiy._drivers._button.Button(channel=_GPIO_BUTTON)
return _voicehat_button
def get_led():
"""Returns a driver to control the VoiceHat LED light with various animations.
led = aiy.voicehat.get_led()
# You may set any LED animation:
led.set_state(aiy.voicehat.LED.PULSE_QUICK)
led.set_state(aiy.voicehat.LED.BLINK)
# Or turn off the light but keep the driver running:
led.set_state(aiy.voicehat.LED_OFF)
"""
global _voicehat_led
if not _voicehat_led:
_voicehat_led = aiy._drivers._led.LED(channel=_GPIO_LED)
_voicehat_led.start()
return _voicehat_led
def get_status_ui():
"""Returns a driver to control the LED via statuses.
The supported statuses are:
- "starting"
- "ready"
- "listening"
- "thinking"
- "stopping"
- "power-off"
- "error"
Optionally, a sound may be played once when the status changes to
"listening". For example, if you have a wave file at ~/ding.wav, you may set
the trigger sound by:
aiy.voicehat.get_status_ui().set_trigger_sound_wave('~/ding.wav')
To set the status, use:
aiy.voicehat.get_status_ui().set_state('starting')
aiy.voicehat.get_status_ui().set_state('thinking')
"""
global _status_ui
if not _status_ui:
_status_ui = aiy._drivers._status_ui._StatusUi()
return _status_ui
| 29.809091
| 82
| 0.695334
|
7953346f1d1d506e9341e5d25669ee2d5b56db8e
| 2,412
|
py
|
Python
|
models.py
|
saschalalala/paranoiabot
|
82a97aa0050bbcf04438ea0a59b828d8f6794409
|
[
"MIT"
] | null | null | null |
models.py
|
saschalalala/paranoiabot
|
82a97aa0050bbcf04438ea0a59b828d8f6794409
|
[
"MIT"
] | null | null | null |
models.py
|
saschalalala/paranoiabot
|
82a97aa0050bbcf04438ea0a59b828d8f6794409
|
[
"MIT"
] | null | null | null |
from django.db import models
from enumfields import (
EnumField,
Enum
)
# Create your models here.
class Clearance(Enum):
IR = 0
R = 1
O = 2
Y = 3
G = 4
B = 5
I = 6
V = 7
U = 8
class PlayerManager(models.Manager):
"""
A default filter to avoid duplicate code and testing with not enabled users
"""
def get_queryset(self):
return super(PlayerManager, self).get_queryset().filter(enabled=True)
class Player(models.Model):
telegram_id = models.IntegerField()
name = models.CharField(max_length=40)
gm = models.BooleanField(default=False)
clearance = EnumField(Clearance, max_length=1)
home_sector = models.CharField(max_length=3)
clone_number = models.IntegerField(default=1)
custom_player_name = models.CharField(max_length=50, blank=True)
pp = models.IntegerField(default=25)
credits = models.IntegerField(default=1000)
# For debugging / testing purposes
enabled = models.BooleanField(default=False)
objects = PlayerManager()
def get_player_name(self):
if not self.custom_player_name or self.custom_player_name == "":
return "{0.name}-{0.clearance}-{0.home_sector}-{0.clone_number}".format(self)
return self.custom_player_name
def set_custom_player_name(self, name):
self.custom_player_name = name
def increment_clearance(self):
self.clearance = Clearance(self.clearance.value + 1)
def decrement_clearance(self):
self.clearance = Clearance(self.clearance.value - 1)
def increment_clone(self):
self.clone_number += 1
def decrement_clone(self):
self.clone_number -= 1
def add_pp(self, number):
self.pp += number
def remove_pp(self, number):
self.pp -= number
def add_credits(self, number):
self.credits += number
def remove_credits(self, number):
self.credits -= number
def __str__(self):
return self.get_player_name()
class Game(models.Model):
channel_id = models.IntegerField(max_length=50)
class Snippet(models.Model):
key = models.CharField(max_length=100, unique=True)
value = models.CharField(max_length=1000)
added_by = models.CharField(max_length=100, blank=True)
added_via = models.CharField(max_length=100, blank=True)
def __str__(self):
return "{0.key} {0.value}".format(self)
| 26.217391
| 89
| 0.671642
|
795335030b9b7c588ca14e7aa039af8d65098f6c
| 305
|
py
|
Python
|
2017/02/tax-bracket-proposals-20170224/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14
|
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2017/02/tax-bracket-proposals-20170224/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2017/02/tax-bracket-proposals-20170224/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7
|
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1E1HTsh3C69-dbAp3h15bkYnyDemVLA4oHDOorvwrcUY'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714
| 77
| 0.816393
|
7953359a28e65f7d7dc9e56b0bf213c30e3bae1f
| 729
|
py
|
Python
|
python/ch_08_coin_count_test.py
|
simonmonk/raspberrypi_cookbook_ed4
|
dc320dfae4252f70c812af1dd7739d13d09615c1
|
[
"MIT"
] | 7
|
2022-03-19T18:53:39.000Z
|
2022-03-22T13:41:30.000Z
|
python/ch_08_coin_count_test.py
|
simonmonk/raspberrypi_cookbook_ed4
|
dc320dfae4252f70c812af1dd7739d13d09615c1
|
[
"MIT"
] | null | null | null |
python/ch_08_coin_count_test.py
|
simonmonk/raspberrypi_cookbook_ed4
|
dc320dfae4252f70c812af1dd7739d13d09615c1
|
[
"MIT"
] | null | null | null |
import cv2
from imutils.video import VideoStream
from imutils import resize
vs = VideoStream(src=0).start()
while True:
img = vs.read()
img = resize(img, width=800)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.blur(img, (3, 3))
detected_circles = cv2.HoughCircles(img,
cv2.HOUGH_GRADIENT, 1, 20, param1 = 50,
param2 = 30, minRadius = 15, maxRadius = 100)
print(detected_circles)
for pt in detected_circles[0]:
a, b, r = pt[0], pt[1], pt[2]
print(a, b)
cv2.circle(img, (int(a), int(b)), int(r), (0, 0, 0), 2)
cv2.imshow('image', img)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
if key == ord('x'):
break
vs.stop()
| 23.516129
| 64
| 0.589849
|
7953360c6eac3b1880ea09f8e000c7cb49ba6fbe
| 2,132
|
py
|
Python
|
recognize.py
|
disalechinmay/Face-Recognition
|
ba7aad77ae595a33f1ee2e8ac1372ed11142cd97
|
[
"Apache-2.0"
] | 1
|
2018-09-12T17:36:20.000Z
|
2018-09-12T17:36:20.000Z
|
recognize.py
|
disalechinmay/FaceHash-Face-Recognition
|
ba7aad77ae595a33f1ee2e8ac1372ed11142cd97
|
[
"Apache-2.0"
] | null | null | null |
recognize.py
|
disalechinmay/FaceHash-Face-Recognition
|
ba7aad77ae595a33f1ee2e8ac1372ed11142cd97
|
[
"Apache-2.0"
] | null | null | null |
import cv2
from imutils import face_utils
import numpy as np
import argparse
import imutils
import dlib
import math
from PIL import Image
from subprocess import call
import os
import threading
import time
import tensorflow as tf
from tensorflow import keras
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import face_recognition
#Set name of new user
currentUser = raw_input("Enter name of current user : ")
model = keras.models.load_model(currentUser + '.model')
#Importing Haar cascade and DLIB's facial landmarks detector
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Start video capture (webcam)
video = cv2.VideoCapture(0)
while(True):
ret, frame = video.read()
cv2.imshow('Original video feed', frame)
#Convert the frame to grayscale
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Activating Haar cascade classifier to detect faces
faces = face_cascade.detectMultiScale(grayFrame, scaleFactor = 1.5, minNeighbors = 5)
for(x, y, w, h) in faces :
pillowImage = Image.fromarray(frame[y:y+h, x:x+w])
#Resizing dimensions
resizedHeight = 300
resizedWidth = 300
######
faceCropped = np.array(pillowImage.resize((resizedHeight, resizedWidth), Image.ANTIALIAS))
start_time = time.time()
encoded = face_recognition.face_encodings(faceCropped)
print("--- Encoding time: %s seconds ---" % (time.time() - start_time))
if(not len(encoded) == 0):
# Keras neural net
npratios = []
npratios.append(encoded[0])
npratios = np.array(npratios)
start_time = time.time()
kerasOutput = model.predict(npratios)
print("--- Detection time: %s seconds ---" % (time.time() - start_time))
print("\nKERAS O/P: {}".format(kerasOutput))
maxValue = kerasOutput[0][0]
for value in kerasOutput[0]:
if(maxValue < value):
maxValue = value
if(maxValue == kerasOutput[0][0] and maxValue > 0.99):
print("\nCONFIDENCE : {}".format(kerasOutput[0][0]*100))
exit(0)
if cv2.waitKey(20) & 0xFF == ord('q') :
break
video.release()
cv2.destroyAllWindows()
| 26.320988
| 92
| 0.721388
|
7953362be22a5b935a9d5e9f2141bdbc2e31298f
| 3,944
|
py
|
Python
|
elasticsearch/client/enrich.py
|
Conky5/elasticsearch-py
|
93543a7fee51c0da6e898c9155bdb5f965c5bb53
|
[
"Apache-2.0"
] | 4
|
2021-05-31T19:34:27.000Z
|
2021-06-01T18:14:31.000Z
|
elasticsearch/client/enrich.py
|
Conky5/elasticsearch-py
|
93543a7fee51c0da6e898c9155bdb5f965c5bb53
|
[
"Apache-2.0"
] | 22
|
2021-05-15T00:01:49.000Z
|
2022-02-26T00:08:00.000Z
|
elasticsearch/client/enrich.py
|
Conky5/elasticsearch-py
|
93543a7fee51c0da6e898c9155bdb5f965c5bb53
|
[
"Apache-2.0"
] | 1
|
2021-04-07T01:37:57.000Z
|
2021-04-07T01:37:57.000Z
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class EnrichClient(NamespacedClient):
@query_params()
def delete_policy(self, name, params=None, headers=None):
"""
Deletes an existing enrich policy and its enrich index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-enrich-policy-api.html>`_
:arg name: The name of the enrich policy
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"DELETE",
_make_path("_enrich", "policy", name),
params=params,
headers=headers,
)
@query_params("wait_for_completion")
def execute_policy(self, name, params=None, headers=None):
"""
Creates the enrich index for an existing enrich policy.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/execute-enrich-policy-api.html>`_
:arg name: The name of the enrich policy
:arg wait_for_completion: Should the request should block until
the execution is complete. Default: True
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"PUT",
_make_path("_enrich", "policy", name, "_execute"),
params=params,
headers=headers,
)
@query_params()
def get_policy(self, name=None, params=None, headers=None):
"""
Gets information about an enrich policy.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/get-enrich-policy-api.html>`_
:arg name: A comma-separated list of enrich policy names
"""
return self.transport.perform_request(
"GET", _make_path("_enrich", "policy", name), params=params, headers=headers
)
@query_params()
def put_policy(self, name, body, params=None, headers=None):
"""
Creates a new enrich policy.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/put-enrich-policy-api.html>`_
:arg name: The name of the enrich policy
:arg body: The enrich policy to register
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_enrich", "policy", name),
params=params,
headers=headers,
body=body,
)
@query_params()
def stats(self, params=None, headers=None):
"""
Gets enrich coordinator statistics and information about enrich policies that
are currently executing.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/enrich-stats-api.html>`_
"""
return self.transport.perform_request(
"GET", "/_enrich/_stats", params=params, headers=headers
)
| 36.518519
| 106
| 0.649594
|
795337426c6e3f7084465848e1adc48e7d8cb836
| 3,397
|
py
|
Python
|
airbyte-integrations/connectors/source-paystack/unit_tests/test_streams.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 22
|
2020-08-27T00:47:20.000Z
|
2020-09-17T15:39:39.000Z
|
airbyte-integrations/connectors/source-paystack/unit_tests/test_streams.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 116
|
2020-08-27T01:11:27.000Z
|
2020-09-19T02:47:52.000Z
|
airbyte-integrations/connectors/source-paystack/unit_tests/test_streams.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 1
|
2020-09-15T06:10:01.000Z
|
2020-09-15T06:10:01.000Z
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
from http import HTTPStatus
from unittest.mock import MagicMock
import pytest
from source_paystack.streams import PaystackStream
START_DATE = "2020-08-01T00:00:00Z"
@pytest.fixture
def patch_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(PaystackStream, "path", "v0/example_endpoint")
mocker.patch.object(PaystackStream, "primary_key", "test_primary_key")
mocker.patch.object(PaystackStream, "__abstractmethods__", set())
def test_request_params_includes_pagination_limit(patch_base_class):
stream = PaystackStream(start_date=START_DATE)
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
params = stream.request_params(**inputs)
assert params == {"perPage": 200}
def test_request_params_for_includes_page_number_for_pagination(patch_base_class):
stream = PaystackStream(start_date=START_DATE)
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": {"page": 2}}
params = stream.request_params(**inputs)
assert params == {"perPage": 200, "page": 2}
def test_next_page_token_increments_page_number(patch_base_class):
stream = PaystackStream(start_date=START_DATE)
mock_response = MagicMock()
mock_response.json.return_value = {"meta": {"page": 2, "pageCount": 4}}
inputs = {"response": mock_response}
token = stream.next_page_token(**inputs)
assert token == {"page": 3}
def test_next_page_token_is_none_when_last_page_reached(patch_base_class):
stream = PaystackStream(start_date=START_DATE)
mock_response = MagicMock()
mock_response.json.return_value = {"meta": {"page": 4, "pageCount": 4}}
inputs = {"response": mock_response}
token = stream.next_page_token(**inputs)
assert token is None
def test_next_page_token_is_none_when_no_pages_exist(patch_base_class):
stream = PaystackStream(start_date=START_DATE)
mock_response = MagicMock()
mock_response.json.return_value = {"meta": {"page": 1, "pageCount": 0}}
inputs = {"response": mock_response}
token = stream.next_page_token(**inputs)
assert token is None
def test_parse_response_generates_data(patch_base_class):
stream = PaystackStream(start_date=START_DATE)
mock_response = MagicMock()
mock_response.json.return_value = {"data": [{"id": 1137850082}, {"id": 1137850097}]}
inputs = {"response": mock_response}
parsed = stream.parse_response(**inputs)
first, second = next(parsed), next(parsed)
assert first == {"id": 1137850082}
assert second == {"id": 1137850097}
@pytest.mark.parametrize(
("http_status", "should_retry"),
[
(HTTPStatus.OK, False),
(HTTPStatus.BAD_REQUEST, False),
(HTTPStatus.TOO_MANY_REQUESTS, True),
(HTTPStatus.INTERNAL_SERVER_ERROR, True),
],
)
def test_should_retry(patch_base_class, http_status, should_retry):
response_mock = MagicMock()
response_mock.status_code = http_status
stream = PaystackStream(start_date=START_DATE)
assert stream.should_retry(response_mock) == should_retry
def test_backoff_time(patch_base_class):
response_mock = MagicMock()
stream = PaystackStream(start_date=START_DATE)
expected_backoff_time = None
assert stream.backoff_time(response_mock) == expected_backoff_time
| 31.747664
| 89
| 0.732705
|
795339b57a0f47bcdcbd8eb45fb246d8d02f60a8
| 24,884
|
py
|
Python
|
src/ggrc/views/converters.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/views/converters.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/views/converters.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Main view functions for import and export pages.
This module handles all view related function to import and export pages
including the import/export api endponts.
"""
# pylint: disable=inconsistent-return-statements
# TODO: Remove this suppression after pylint update to v.1.8.3 or higher.
import re
from functools import wraps
from logging import getLogger
from StringIO import StringIO
from datetime import datetime
from googleapiclient import errors
import flask
from flask import current_app
from flask import json
from flask import render_template
from flask import request
from werkzeug import exceptions as wzg_exceptions
from ggrc import db
from ggrc import login
from ggrc import settings
from ggrc import utils
from ggrc.app import app
from ggrc.cache import utils as cache_utils
from ggrc.cloud_api import task_queue
from ggrc.converters import base
from ggrc.converters import get_exportables
from ggrc.converters import import_helper
from ggrc.gdrive import file_actions as fa
from ggrc.models import all_models
from ggrc.models import background_task
from ggrc.models import exceptions as models_exceptions
from ggrc.models import import_export
from ggrc.notifications import job_emails
from ggrc.query import builder
from ggrc.query import exceptions as query_exceptions
from ggrc.utils import benchmark
from ggrc.utils import errors as app_errors
EXPORTABLES_MAP = {exportable.__name__: exportable for exportable
in get_exportables().values()}
IGNORE_FIELD_IN_TEMPLATE = {
"Assessment": {"evidences_file",
"end_date"},
"Audit": {"evidences_file"},
}
# pylint: disable=invalid-name
logger = getLogger(__name__)
def check_required_headers(required_headers):
"""Check required headers to the current request"""
headers_errors = []
for header, valid_values in required_headers.items():
if header not in request.headers:
headers_errors.append(
"Missing required header '{}'".format(header))
elif request.headers[header] not in valid_values:
headers_errors.append(
"Invalid header value for '{}'".format(header))
if headers_errors:
raise wzg_exceptions.BadRequest("\n".join(headers_errors))
def parse_export_request():
""" Check if request contains all required fields """
required_headers = {
"X-Requested-By": ["GGRC"],
"Content-Type": ["application/json"],
"X-export-view": ["blocks", "grid"],
}
check_required_headers(required_headers)
return request.json
def export_file(export_to, filename, csv_string=None):
"""Export file to csv file or gdrive file"""
if export_to == "gdrive":
gfile = fa.create_gdrive_file(csv_string, filename)
headers = [('Content-Type', 'application/json'), ]
return current_app.make_response((json.dumps(gfile), 200, headers))
if export_to == "csv":
headers = [
("Content-Type", "text/csv"),
("Content-Disposition", "attachment"),
]
return current_app.make_response((csv_string, 200, headers))
raise wzg_exceptions.BadRequest(app_errors.BAD_PARAMS)
def handle_export_request_error(handle_function):
"""Decorator for handle exceptions during exporting"""
@wraps(handle_function)
def handle_wrapper(*args, **kwargs):
"""Wrapper for handle exceptions during exporting"""
try:
return handle_function(*args, **kwargs)
except query_exceptions.BadQueryException as exception:
raise wzg_exceptions.BadRequest(exception.message)
except wzg_exceptions.Unauthorized as ex:
raise wzg_exceptions.Unauthorized("%s %s" % (ex.message,
app_errors.RELOAD_PAGE))
except errors.HttpError as e:
message = json.loads(e.content).get("error").get("message")
if e.resp.code == 401:
raise wzg_exceptions.Unauthorized("%s %s" % (message,
app_errors.RELOAD_PAGE))
raise wzg_exceptions.InternalServerError(message)
except Exception as e: # pylint: disable=broad-except
logger.exception(e.message)
if settings.TESTING:
raise
raise wzg_exceptions.InternalServerError(
app_errors.INTERNAL_SERVER_ERROR.format(job_type="Export"))
return handle_wrapper
@handle_export_request_error
def handle_export_request():
"""Export request handler"""
# pylint: disable=too-many-locals
with benchmark("handle export request data"):
data = parse_export_request()
objects = data.get("objects")
exportable_objects = data.get("exportable_objects", [])
export_to = data.get("export_to")
current_time = data.get("current_time")
with benchmark("Generate CSV string"):
csv_string, object_names = make_export(objects, exportable_objects)
with benchmark("Make response."):
filename = "{}_{}.csv".format(object_names, current_time)
return export_file(export_to, filename, csv_string)
def get_csv_template(objects):
"""Make csv template"""
for object_data in objects:
class_name = object_data["object_name"]
object_class = EXPORTABLES_MAP[class_name]
ignore_fields = IGNORE_FIELD_IN_TEMPLATE.get(class_name, [])
filtered_fields = [
field for field in
import_helper.get_object_column_definitions(object_class)
if field not in ignore_fields
]
object_data["fields"] = filtered_fields
return make_export(objects)
@handle_export_request_error
def handle_export_csv_template_request():
"""Export template request handler"""
data = parse_export_request()
objects = data.get("objects")
export_to = data.get("export_to")
csv_string, object_names = get_csv_template(objects)
filename = "{}_template.csv".format(object_names)
return export_file(export_to, filename, csv_string)
def make_export(objects, exportable_objects=None, ie_job=None):
"""Make export"""
query_helper = builder.QueryHelper(objects)
ids_by_type = query_helper.get_ids()
converter = base.ExportConverter(
ids_by_type=ids_by_type,
exportable_queries=exportable_objects,
ie_job=ie_job,
)
csv_data = converter.export_csv_data()
object_names = "_".join(converter.get_object_names())
return csv_data, object_names
def check_import_file():
"""Check if imported file format and type is valid"""
if "file" not in request.files or not request.files["file"]:
raise wzg_exceptions.BadRequest(app_errors.MISSING_FILE)
csv_file = request.files["file"]
if not csv_file.filename.lower().endswith(".csv"):
raise wzg_exceptions.BadRequest(app_errors.WRONG_FILE_TYPE)
return csv_file
def parse_import_request():
""" Check if request contains all required fields """
required_headers = {
"X-Requested-By": ["GGRC"],
"X-test-only": ["true", "false"],
}
check_required_headers(required_headers)
try:
file_data = request.json
dry_run = request.headers["X-test-only"] == "true"
return dry_run, file_data
except: # pylint: disable=bare-except
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Export"))
def handle_import_request():
"""Import request handler"""
dry_run, file_data = parse_import_request()
csv_data = fa.get_gdrive_file(file_data)
return make_response(make_import(csv_data, dry_run))
def make_response(data):
"""Make response"""
response_json = json.dumps(data)
headers = [("Content-Type", "application/json")]
return current_app.make_response((response_json, 200, headers))
def make_import(csv_data, dry_run, ie_job=None):
"""Make import"""
try:
converter = base.ImportConverter(ie_job,
dry_run=dry_run,
csv_data=csv_data)
converter.import_csv_data()
return converter.get_info()
except models_exceptions.ImportStoppedException:
raise
except Exception as e: # pylint: disable=broad-except
logger.exception("Import failed: %s", e.message)
if settings.TESTING:
raise
raise wzg_exceptions.BadRequest("{} {}".format(
app_errors.INTERNAL_SERVER_ERROR.format(job_type="Import"), e.message
))
@app.route("/_background_tasks/run_export", methods=["POST"])
@background_task.queued_task
def run_export(task):
"""Run export"""
user = login.get_current_user()
ie_id = task.parameters.get("ie_id")
objects = task.parameters.get("objects")
exportable_objects = task.parameters.get("exportable_objects")
try:
ie_job = import_export.get(ie_id)
content, _ = make_export(objects, exportable_objects, ie_job)
db.session.refresh(ie_job)
if ie_job.status == "Stopped":
return utils.make_simple_response()
ie_job.status = "Finished"
ie_job.end_at = datetime.utcnow()
ie_job.content = content
db.session.commit()
job_emails.send_email(job_emails.EXPORT_COMPLETED, user.email,
ie_job.title, ie_id)
except models_exceptions.ExportStoppedException:
logger.info("Export was stopped by user.")
except Exception as e: # pylint: disable=broad-except
logger.exception("Export failed: %s", e.message)
ie_job = import_export.get(ie_id)
try:
ie_job.status = "Failed"
ie_job.end_at = datetime.utcnow()
db.session.commit()
job_emails.send_email(job_emails.EXPORT_CRASHED, user.email)
return utils.make_simple_response(e.message)
except Exception as e: # pylint: disable=broad-except
logger.exception("%s: %s", app_errors.STATUS_SET_FAILED, e.message)
return utils.make_simple_response(e.message)
return utils.make_simple_response()
@app.route("/_background_tasks/run_import_phases", methods=["POST"]) # noqa: ignore=C901
@background_task.queued_task
def run_import_phases(task):
"""Execute import phases"""
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
ie_id = task.parameters.get("ie_id")
user = login.get_current_user()
try:
ie_job = import_export.get(ie_id)
csv_data = import_helper.read_csv_file(
StringIO(ie_job.content.encode("utf-8"))
)
if ie_job.status == "Analysis":
info = make_import(csv_data, True, ie_job)
db.session.rollback()
db.session.refresh(ie_job)
if ie_job.status == "Stopped":
return utils.make_simple_response()
ie_job.results = json.dumps(info)
for block_info in info:
if block_info["block_errors"] or block_info["row_errors"]:
ie_job.status = "Analysis Failed"
ie_job.end_at = datetime.utcnow()
db.session.commit()
job_emails.send_email(job_emails.IMPORT_FAILED, user.email,
ie_job.title)
return utils.make_simple_response()
for block_info in info:
if block_info["block_warnings"] or block_info["row_warnings"]:
ie_job.status = "Blocked"
db.session.commit()
job_emails.send_email(job_emails.IMPORT_BLOCKED, user.email,
ie_job.title)
return utils.make_simple_response()
ie_job.status = "In Progress"
db.session.commit()
if ie_job.status == "In Progress":
info = make_import(csv_data, False, ie_job)
if ie_job.status == "Stopped":
return utils.make_simple_response()
ie_job.results = json.dumps(info)
for block_info in info:
if block_info["block_errors"] or block_info["row_errors"]:
ie_job.status = "Analysis Failed"
ie_job.end_at = datetime.utcnow()
job_emails.send_email(job_emails.IMPORT_FAILED, user.email,
ie_job.title)
db.session.commit()
return utils.make_simple_response()
ie_job.status = "Finished"
ie_job.end_at = datetime.utcnow()
db.session.commit()
job_emails.send_email(job_emails.IMPORT_COMPLETED, user.email,
ie_job.title)
except models_exceptions.ImportStoppedException:
ie_job = import_export.get(ie_id)
job_emails.send_email(job_emails.IMPORT_STOPPED, user.email,
ie_job.title)
logger.info("Import was stopped by user.")
except Exception as e: # pylint: disable=broad-except
logger.exception(e.message)
ie_job = import_export.get(ie_id)
try:
ie_job.status = "Failed"
ie_job.end_at = datetime.utcnow()
db.session.commit()
job_emails.send_email(job_emails.IMPORT_FAILED, user.email,
ie_job.title)
return utils.make_simple_response(e.message)
except Exception as e: # pylint: disable=broad-except
logger.exception("%s: %s", app_errors.STATUS_SET_FAILED, e.message)
return utils.make_simple_response(e.message)
return utils.make_simple_response()
def init_converter_views():
"""Initialize views for import and export."""
# pylint: disable=unused-variable
# The view function trigger a false unused-variable.
@app.route("/_service/export_csv", methods=["POST"])
@login.login_required
def handle_export_csv():
"""Calls export handler"""
with benchmark("handle export request"):
return handle_export_request()
@app.route("/_service/export_csv_template", methods=["POST"])
@login.login_required
def handle_export_csv_template():
"""Calls export csv template handler"""
with benchmark("handle export csv template"):
return handle_export_csv_template_request()
@app.route("/_service/import_csv", methods=["POST"])
@login.login_required
def handle_import_csv():
"""Calls import handler"""
with benchmark("handle import request"):
return handle_import_request()
@app.route("/import")
@login.login_required
def import_view():
"""Get import view"""
return render_template("import_export/import.haml")
@app.route("/export")
@login.login_required
def export_view():
"""Get export view"""
return render_template("import_export/export.haml")
def check_import_export_headers():
"""Check headers"""
required_headers = {
"X-Requested-By": ["GGRC"],
}
check_required_headers(required_headers)
def make_import_export_response(data):
"""Make response"""
response_json = json.dumps(data)
headers = [("Content-Type", "application/json")]
return current_app.make_response((response_json, 200, headers))
def handle_start(ie_job):
"""Handle import start command"""
if ie_job.status == "Not Started":
ie_job.status = "Analysis"
elif ie_job.status == "Blocked":
ie_job.status = "In Progress"
else:
raise wzg_exceptions.BadRequest(app_errors.WRONG_STATUS)
try:
ie_job.start_at = datetime.utcnow()
db.session.commit()
run_background_import(ie_job.id)
return make_import_export_response(ie_job.log_json())
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.JOB_FAILED.format(job_type="Import")
)
def run_background_import(ie_job_id):
"""Run import job in background task."""
background_task.create_task(
name="import",
url=flask.url_for(run_import_phases.__name__),
parameters={
"ie_id": ie_job_id,
"parent": {
"type": "ImportExport",
"id": ie_job_id,
}
},
queue="ggrcImport",
queued_callback=run_import_phases,
operation_type=all_models.ImportExport.IMPORT_JOB_TYPE.lower(),
)
db.session.commit()
def handle_import_put(**kwargs):
"""Handle import put"""
command = kwargs.get("command2")
ie_id = kwargs.get("id2")
user = login.get_current_user()
if user.system_wide_role == 'No Access':
raise wzg_exceptions.Forbidden()
if not ie_id or not command or command not in ("start", "stop"):
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Import"))
try:
ie_job = import_export.get(ie_id)
except (wzg_exceptions.Forbidden, wzg_exceptions.NotFound):
raise
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Import"))
if command == 'start':
return handle_start(ie_job)
elif command == "stop":
return handle_import_stop(**kwargs)
raise wzg_exceptions.BadRequest(app_errors.BAD_PARAMS)
def handle_import_get(**kwargs):
"""Handle import get"""
return handle_get(kwargs.get("id2"), kwargs.get("command2"), "Import")
def handle_get(id2, command, job_type):
"""Handle simple get and collection get"""
check_import_export_headers()
if command:
if command != "download":
wzg_exceptions.BadRequest("Unknown command")
return handle_file_download(id2)
try:
if id2:
res = import_export.get(id2).log_json()
else:
ids = request.args.get("id__in")
res = import_export.get_jobs(job_type, ids.split(",") if ids else None)
except (wzg_exceptions.Forbidden, wzg_exceptions.NotFound):
raise
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type=job_type))
return make_import_export_response(res)
def check_import_filename(filename):
"""Check filename has no special symbols"""
spec_symbols = r'\\#?'
if re.search('[{}]+'.format(spec_symbols), filename):
raise wzg_exceptions.BadRequest(r"""
The file name should not contain special symbols \#?. Please correct
the file name and import a Google sheet or a file again.
""")
def handle_import_post(**kwargs):
""" Handle import post """
check_import_export_headers()
import_export.delete_previous_imports()
file_meta = request.json
csv_data, csv_content, filename = fa.get_gdrive_file_data(file_meta)
check_import_filename(filename)
try:
objects, results, failed = import_helper.count_objects(csv_data)
ie = import_export.create_import_export_entry(
content=csv_content,
gdrive_metadata=file_meta,
title=filename,
status="Not Started" if not failed else "Analysis Failed",
results=results)
return make_import_export_response({
"objects": objects if not failed else [],
"import_export": ie.log_json()})
except wzg_exceptions.Unauthorized:
raise
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Import"))
def handle_file_download(id2):
""" Download file """
try:
export_to = request.args.get("export_to")
ie = import_export.get(id2)
return export_file(export_to, ie.title, ie.content.encode("utf-8"))
except (wzg_exceptions.Forbidden,
wzg_exceptions.NotFound,
wzg_exceptions.Unauthorized):
raise
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Download"))
def handle_export_put(**kwargs):
"""Handle export put"""
command = kwargs.get("command2")
ie_id = kwargs.get("id2")
if not ie_id or not command or command != "stop":
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Export"))
return handle_export_stop(**kwargs)
def handle_export_get(**kwargs):
"""Handle export get"""
return handle_get(kwargs.get("id2"), kwargs.get("command2"), "Export")
def handle_export_post(**kwargs):
"""Handle export post"""
check_import_export_headers()
request_json = request.json
objects = request_json.get("objects")
exportable_objects = request_json.get("exportable_objects", [])
current_time = request.json.get("current_time")
user = login.get_current_user()
if user.system_wide_role == 'No Access':
raise wzg_exceptions.Forbidden()
if not objects or not current_time:
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Export"))
try:
filename = import_helper.get_export_filename(objects,
current_time,
exportable_objects)
ie = import_export.create_import_export_entry(
job_type="Export",
status="In Progress",
title=filename,
start_at=datetime.utcnow(),
)
run_background_export(ie.id, objects, exportable_objects)
return make_import_export_response(ie.log_json())
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Export"))
def run_background_export(ie_job_id, objects, exportable_objects):
"""Run export job in background task."""
background_task.create_task(
name="export",
url=flask.url_for(run_export.__name__),
parameters={
"ie_id": ie_job_id,
"objects": objects,
"exportable_objects": exportable_objects,
"parent": {
"type": "ImportExport",
"id": ie_job_id,
}
},
queue="ggrcImport",
queued_callback=run_export,
operation_type=all_models.ImportExport.EXPORT_JOB_TYPE.lower(),
)
db.session.commit()
def handle_delete(**kwargs):
""" Delete import_export entry """
check_import_export_headers()
try:
ie = import_export.get(kwargs["id2"])
db.session.delete(ie)
db.session.commit()
return make_import_export_response("OK")
except (wzg_exceptions.Forbidden, wzg_exceptions.NotFound):
raise
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Import/Export"))
def handle_import_stop(**kwargs):
"""Handle import stop"""
try:
ie_job = import_export.get(kwargs["id2"])
if ie_job.status in ("Analysis", "In Progress", "Blocked"):
ie_job.status = "Stopped"
ie_job.end_at = datetime.utcnow()
# Stop tasks only on non local instance
if getattr(settings, "APPENGINE_INSTANCE", "local") != "local":
stop_ie_bg_tasks(ie_job)
db.session.commit()
expire_ie_cache(ie_job)
return make_import_export_response(ie_job.log_json())
if ie_job.status == "Stopped":
raise models_exceptions.ImportStoppedException()
except wzg_exceptions.Forbidden:
raise
except models_exceptions.ImportStoppedException:
raise wzg_exceptions.BadRequest(app_errors.IMPORT_STOPPED_WARNING)
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Import"))
# Need to implement a better solution in order to identify specific
# errors like here
raise wzg_exceptions.BadRequest(app_errors.WRONG_STATUS)
def handle_export_stop(**kwargs):
"""Handle export stop"""
try:
ie_job = import_export.get(kwargs["id2"])
if ie_job.status == "In Progress":
ie_job.status = "Stopped"
# Stop tasks only on non local instance
if getattr(settings, "APPENGINE_INSTANCE", "local") != "local":
stop_ie_bg_tasks(ie_job)
db.session.commit()
expire_ie_cache(ie_job)
return make_import_export_response(ie_job.log_json())
if ie_job.status == "Stopped":
raise models_exceptions.ExportStoppedException()
except wzg_exceptions.Forbidden:
raise
except models_exceptions.ExportStoppedException:
raise wzg_exceptions.BadRequest(app_errors.EXPORT_STOPPED_WARNING)
except Exception as e:
logger.exception(e.message)
raise wzg_exceptions.BadRequest(
app_errors.INCORRECT_REQUEST_DATA.format(job_type="Export"))
raise wzg_exceptions.BadRequest(app_errors.WRONG_STATUS)
def expire_ie_cache(ie_job):
"""Expire export status cache to force DB request."""
cache_manager = cache_utils.get_cache_manager()
cache_key = cache_utils.get_ie_cache_key(ie_job)
cache_manager.cache_object.memcache_client.delete(cache_key)
def get_ie_bg_tasks(ie_job):
"""Get BackgroundTasks related to ImportExport job."""
return all_models.BackgroundTask.query.join(
all_models.BackgroundOperation
).filter(
all_models.BackgroundOperation.object_type == ie_job.type,
all_models.BackgroundOperation.object_id == ie_job.id,
)
def stop_ie_bg_tasks(ie_job):
"""Stop background tasks related to ImportExport job."""
bg_tasks = get_ie_bg_tasks(ie_job)
for task in bg_tasks:
try:
task_queue.stop_bg_task(task.name, "ggrcImport")
except errors.HttpError as err:
if err.resp.status == 404:
logger.warning(
"Task '%s' wasn't found in queue. It will be stopped.",
task.name
)
else:
raise err
task.status = all_models.BackgroundTask.STOPPED_STATUS
| 33.855782
| 89
| 0.702258
|
795339d53e58abe73283674189e762c74508a449
| 3,579
|
py
|
Python
|
models/CausalNormClassifier.py
|
hyperconnect/LADE
|
cfe96b7ca6520f3410d4cae9cc10919e6114bbb9
|
[
"BSD-3-Clause"
] | 78
|
2020-11-30T09:46:01.000Z
|
2022-03-30T02:42:48.000Z
|
models/CausalNormClassifier.py
|
hyperconnect/LADE
|
cfe96b7ca6520f3410d4cae9cc10919e6114bbb9
|
[
"BSD-3-Clause"
] | 18
|
2020-12-30T10:39:11.000Z
|
2022-03-21T07:27:27.000Z
|
models/CausalNormClassifier.py
|
hyperconnect/LADE
|
cfe96b7ca6520f3410d4cae9cc10919e6114bbb9
|
[
"BSD-3-Clause"
] | 8
|
2020-12-02T15:41:23.000Z
|
2022-02-26T11:57:37.000Z
|
import torch
import torch.nn as nn
from utils import *
from os import path
import math
class Causal_Norm_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, use_effect=True, num_head=2, tau=16.0, alpha=3.0, gamma=0.03125, *args):
super(Causal_Norm_Classifier, self).__init__()
self.weight = nn.Parameter(torch.Tensor(num_classes, feat_dim).cuda(), requires_grad=True)
self.scale = tau / num_head # 16.0 / num_head
self.norm_scale = gamma # 1.0 / 32.0
self.alpha = alpha # 3.0
self.num_head = num_head
self.head_dim = feat_dim // num_head
self.use_effect = use_effect
self.reset_parameters(self.weight)
# self.relu = nn.ReLU(inplace=True)
def reset_parameters(self, weight):
stdv = 1. / math.sqrt(weight.size(1))
weight.data.uniform_(-stdv, stdv)
def forward(self, x, label, embed):
# calculate capsule normalized feature vector and predict
normed_w = self.multi_head_call(self.causal_norm, self.weight, weight=self.norm_scale)
normed_x = self.multi_head_call(self.l2_norm, x)
y = torch.mm(normed_x * self.scale, normed_w.t())
# remove the effect of confounder c during test
if (not self.training) and self.use_effect:
self.embed = torch.from_numpy(embed).view(1, -1).to(x.device)
normed_c = self.multi_head_call(self.l2_norm, self.embed)
head_dim = x.shape[1] // self.num_head
x_list = torch.split(normed_x, head_dim, dim=1)
c_list = torch.split(normed_c, head_dim, dim=1)
w_list = torch.split(normed_w, head_dim, dim=1)
output = []
for nx, nc, nw in zip(x_list, c_list, w_list):
cos_val, sin_val = self.get_cos_sin(nx, nc)
y0 = torch.mm((nx - cos_val * self.alpha * nc) * self.scale, nw.t())
output.append(y0)
y = sum(output)
return y, None
def get_cos_sin(self, x, y):
cos_val = (x * y).sum(-1, keepdim=True) / torch.norm(x, 2, 1, keepdim=True) / torch.norm(y, 2, 1, keepdim=True)
sin_val = (1 - cos_val * cos_val).sqrt()
return cos_val, sin_val
def multi_head_call(self, func, x, weight=None):
assert len(x.shape) == 2
x_list = torch.split(x, self.head_dim, dim=1)
if weight:
y_list = [func(item, weight) for item in x_list]
else:
y_list = [func(item) for item in x_list]
assert len(x_list) == self.num_head
assert len(y_list) == self.num_head
return torch.cat(y_list, dim=1)
def l2_norm(self, x):
normed_x = x / torch.norm(x, 2, 1, keepdim=True)
return normed_x
def capsule_norm(self, x):
norm= torch.norm(x.clone(), 2, 1, keepdim=True)
normed_x = (norm / (1 + norm)) * (x / norm)
return normed_x
def causal_norm(self, x, weight):
norm= torch.norm(x, 2, 1, keepdim=True)
normed_x = x / (norm + weight)
return normed_x
def create_model(feat_dim, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, use_effect=True, num_head=None, tau=None, alpha=None, gamma=None, *args):
print('Loading Causal Norm Classifier with use_effect: {}, num_head: {}, tau: {}, alpha: {}, gamma: {}.'.format(str(use_effect), num_head, tau, alpha, gamma))
clf = Causal_Norm_Classifier(num_classes, feat_dim, use_effect=use_effect, num_head=num_head, tau=tau, alpha=alpha, gamma=gamma)
return clf
| 42.607143
| 180
| 0.621403
|
795339f29dc25eabcba954c0ee0dac74019b153e
| 8,170
|
py
|
Python
|
dmarc_imap.py
|
sirio81/dmarc-monitoring
|
372d66889246786c22b06bc9d777bd39b104c0d9
|
[
"MIT"
] | null | null | null |
dmarc_imap.py
|
sirio81/dmarc-monitoring
|
372d66889246786c22b06bc9d777bd39b104c0d9
|
[
"MIT"
] | null | null | null |
dmarc_imap.py
|
sirio81/dmarc-monitoring
|
372d66889246786c22b06bc9d777bd39b104c0d9
|
[
"MIT"
] | null | null | null |
import os
import imaplib
import email
import email.header
import re
import argparse
# Should we attempt validation of DKIM signatures on report emails?
# try:
# import dkim
# import dnspython
# DKIM_CHECK = True
# except ImportError:
# DKIM_CHECK = False
__all__ = ['ReportDownloader', 'IMAPException']
# DMARC report names are of the form:
# receiverdomain!senderdomain!startt!endt.zip
pattern = r'''^(?:[A-Za-z0-9.\-]+!){2} # any sequence of letters an numbers dashes and dots,
# at the begginning of the string, with an exclamation mark as last charachter
[0-9]{10}! # 10 digit of the timestamp followed by an exclamation mark
[0-9]{10}. # 10 digit of the timestamp followed by a dot
(xml|zip|gz|xml.gz)$ # any of the following extension at the end of the string'''
RUA_NAME_FORMAT = re.compile(pattern,re.X)
class IMAPException(Exception):
pass
class ReportDownloader(object):
def __init__(self, email_address, password, imap_url, dmarc_label=None, unread_only=True):
self.email_address = email_address
self.email_password = password
self.imap_url = imap_url
self.dmarc_label = dmarc_label
self._search_param = "UNSEEN" if unread_only else "ALL"
self._logged_in = False
self._mailbox = imaplib.IMAP4_SSL(self.imap_url)
def login(self):
if not self._logged_in:
try:
rv, data = self._mailbox.login(self.email_address, self.email_password)
if rv != "OK":
raise IMAPException("Error logging in!")
except imaplib.IMAP4.error as e:
print "ERROR: Login Failed! " + e.message
raise IMAPException("Fatal Error logging in!")
self._logged_in = True
print "INFO: Logged in to IMAP server successfully."
else:
pass
def download(self, destination_folder='./reports'):
# Keep track of reperts we downloaded this session:
reports_downloaded = []
# Allow skipping of the extra call to login():
if not self._logged_in:
self.login()
# Create the output directory if it doesn't exist:
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# If we need to narrow down emails searched by a label:
if self.dmarc_label is not None:
rv, data = self._mailbox.select(self.dmarc_label)
if rv != "OK":
print "ERROR: Problem selecting label!"
raise IMAPException("Error selecting label!")
else:
self._mailbox.select()
# Search for all emails matching the read/unread criteria:
rv, data = self._mailbox.search(None, self._search_param)
if rv != "OK":
print "ERROR: Problem searching for emails!"
raise IMAPException("Error searching for emails!")
# Iterate through the emails, downloading any zip or gz attachments:
email_numbers = data[0].split()
n_expected = len(email_numbers)
n_found = 0
n_new = 0
print "INFO: Scanning %d email%s." % (n_expected, "" if n_expected == 1 else "s")
for num in email_numbers:
found = False
rv, data = self._mailbox.fetch(num, '(RFC822)')
if rv != 'OK':
print "ERROR: Problem getting a message!"
raise IMAPException("Failed to fetch a message!")
# Turn the message into a string, and search for attachments:
m = email.message_from_string(data[0][1])
message_subject = unicode(email.header.decode_header(m['Subject'])[0][0])
#
# FIXME: At this point some checking could be done to validate this is actually
# a *genuine* DMARC report and not fake or spam. Unfortunately not all report
# providers sign their reporting emails . . .
#
# if DKIM_CHECK:
# valid = dkim.dkim_verify(data[0][1])
# if not valid:
# continue
#
attachment_types = ['application/zip', 'application/gzip']
if (m.get_content_maintype() == 'multipart' or m.get_content_type() in attachment_types):
for part in m.walk():
is_attachment = part.get('Content-Disposition', '').startswith("attachment")
is_inline_attachment = part.get_content_type() in attachment_types
if is_attachment or is_inline_attachment:
filename = part.get_filename()
# Process the attachment only if named as expected (RFC 7489, Section 7.2.1.1):
if RUA_NAME_FORMAT.match(filename):
n_found += 1
found = True
file_path = os.path.join(destination_folder, filename)
# Download the attachment only if it doesn't already exist:
file_exists = os.path.isfile(file_path)
duplicate_name_this_session = filename in reports_downloaded
if not file_exists:
n_new += 1
fp = open(file_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
# Assert only one report per email:
reports_downloaded.append(filename)
break
elif duplicate_name_this_session:
# If there's already a file with this name and we downloaded it *this session*,
# it's likely that it's not the same report but a different one with the same name.
# Google does this if the DNS record policy published changes during a reporting window.
print "WARN: Message (%s) contained a DMARC report with a duplicate name!" % message_subject
print "INFO: Duplicate report names could indicate the DNS record changed during a reporting window."
print "INFO: If this is the case, download this report by hand."
break
# If we expect to only see DMARC emails, note when an email contains no report:
if self.dmarc_label is not None and not found:
print "INFO: Message (%s) contained no DMARC report." % message_subject
# Finished trawling the emails: did we miss anything?
print "INFO: Examined %d message%s, found %d DMARC report%s." % (n_expected, "" if n_expected == 1 else "s", n_found, "" if n_found == 1 else "s")
print "INFO: Downloaded %d new DMARC report%s." % (n_new, "" if n_new == 1 else "s")
if __name__ == "__main__":
# Allow specification of parameters at runtime:
options = argparse.ArgumentParser(description="Download DMARC reports from an IMAP server.")
options.add_argument("-e", "--email", help="email address to access", required=True)
options.add_argument("-pf", "--pwdfile", help="text file containing the IMAP password", required=True)
options.add_argument("-s", "--server", help="what IMAP server to connect to (default to Google)", default='imap.gmail.com')
options.add_argument("-a", "--all", help="read all messages, not just unread", action="store_true")
options.add_argument("-l", "--label", help="the label DMARC messages are stored under", default=None)
args = options.parse_args()
# Download reports, to the default directory:
password = None
with open(args.pwdfile, 'r') as password_file:
password = password_file.readline().strip()
downloader = ReportDownloader(args.email, password, args.server, dmarc_label=args.label, unread_only=not args.all)
downloader.download()
| 50.122699
| 154
| 0.586291
|
79533a2d2a1cac13cbc89cced298e81a245728f7
| 7,289
|
py
|
Python
|
grtoolkit/Circuits/Phasors/__init__.py
|
ZenosParadox/grtoolkit
|
2e34f151a78f57864e39e572c221ca4b73e48bb7
|
[
"MIT"
] | 3
|
2020-02-02T14:33:30.000Z
|
2020-07-29T00:27:46.000Z
|
grtoolkit/Circuits/Phasors/__init__.py
|
ZenosParadox/grtoolkit
|
2e34f151a78f57864e39e572c221ca4b73e48bb7
|
[
"MIT"
] | null | null | null |
grtoolkit/Circuits/Phasors/__init__.py
|
ZenosParadox/grtoolkit
|
2e34f151a78f57864e39e572c221ca4b73e48bb7
|
[
"MIT"
] | 2
|
2020-02-02T14:33:32.000Z
|
2022-03-21T14:33:34.000Z
|
from sympy import pi
from math import *
from cmath import *
# General expression for sinusoid:
# v(t) = Vm*sin(w*t + phi)
# where,
# phi = phase
# If comparing two sinusoids phase1 != phase2 then "out of phase".
# One sinusoid leads or lags by phase in radians or degrees.
# If phase difference = 0 then "in phase"
# Trigonometric identities:
# sin(A +/- B) = sinAcosB +/- cosAsinB
# cos(A +/- B) = cosAcosB -/+ sinAsinB
# With these identities, it is easy to show that:
# sin(wt +/- 180 degrees) = -sin(wt)
# cos(wt +/- 180 degrees) = -cos(wt)
# sin(wt +/- 90 degrees) = +/-cos(wt)
# cos(wt +/- 90 degrees) = -/+sin(wt)
# Adding
# A*cos(w*t) + B*sin(w*t) = C*cos(w*t-phase)
# C = (A**2+B**2)**(1/2)
# phase = tan-1(B/A)
# Example:
# 3*cos(wt) - 4*sin(w*t) = 5*cos(wt + 53.1)
def T(w):
"""
Period
w = angular frequency
Theory:
v(t + T) = v(t)
"""
T = 2 * pi / w
return T
def f(w):
"""Cyclic frequency"""
return T(w)
def w(f):
"""Angular frequency"""
return 2*pi*f
# A complex number can be written in rectangular form as
# z = x + jy RECTANGULAR FORM
# z = r*angle(phase) POLAR FORM
# z = r*exp(j*phase) EXPONENTIAL FORM
# j = (-1)**.5
# r = (x**2 + y**2)**(1/2)
# phi = tan-1(y/x)
# x = r*cos(phi)
# y = r*cos(phi)
# z = x + jy = r*angle(phi) = r*(cos(phi) + j*sin(phi))
# Addition and subtraction are better performed in rectangular form
# Multiplication and division are better done in polar form
# Addition:
# z1 + z2 = (x1 + x2) + j(y1 + y2)
# Subtraction
# z1 - z2 = (x1 - x2) + j(y1 - y2)
# Multiplication:
# z1*z2 = r1*r2 ang(phi1 + phi2)
# Division:
# z1/z2 = r1/r2 ang(phi1-phi2)
# Reciprocal
# 1/z = 1/r ang(-phi)
# Square Root
# sqrt(z) = sqrt(r) ang(phi/2)
# Complex Conjugate
# z* = x-j*y = r ang(-phi) = r*exp(-j*phi)
# Note:
# 1/j = -j
# exp(+/-j*phi) = cos(phi) +/- j*sin(phi)
# cos(phi) = Re( exp(j*phi))
# sin(phi) = Im(exp(j*phi))
# v(t) = Re(Vm*cos(w*t+phi))
# V is used as the phasor representation of v(t)
class signal:
"""
Types: "rect" "polar_rad" "polar_deg"
https://docs.python.org/2/reference/datamodel.html#emulating-numeric-types
"""
def __init__(self, val=complex(0,0), format="rect"):
"""
Types: "rect" "polar_rad" "polar_deg" "wave_deg" "wave_rad"
Input:
rect: val = complex(0,0)
polar_rad = (A,phi_rad)
polar_deg = (A, phi_deg)
wave = (A, "cos", w, phi) or (A, "sin", w, phi)
"""
if format == "rect":
self.rect = val
elif format == "polar_rad":
self.polar_rad = val
elif format == "polar_deg":
self.polar_deg = val
elif "wave" in format:
self.wave_original = val
self.wave_original_type = format
phasor = ()
val = list(val)
if "deg" in format:
val[3] = radians(val[3])
if val[1] == "cos":
phasor = self.__cos2Phasor(val[0], val[3])
elif val[1] == "sin":
phasor = self.__sin2Phasor(val[0], val[3])
else:
raise 'Not a valid sinusoid. Format must be (A, "cos", w, phi) or (A, "cos", w, phi)'
self.polar_rad = (phasor[0], phasor[1])
else:
raise 'type must be: "rect" "polar_rad" "polar_deg"'
@property
def rect(self):
return self._rect
@rect.setter
def rect(self, val):
self._rect = val
self._polar_rad = self.__rect2polar(self._rect)
self._polar_deg = self.__polar_deg_view()
@property
def polar_rad(self):
return self._polar_rad
@polar_rad.setter
def polar_rad(self, val):
self._polar_rad = val
self._polar_deg = self.__polar_deg_view()
self._rect = self.__polar2rect(self._polar_rad)
def __polar_deg_view(self):
"""
Does not return actual polar as actual polar needs to be in radians.
For viewing ONLY.
"""
polar = self._polar_rad
polar_fix = list()
polar_fix.append(polar[0])
polar_fix.append(degrees(polar[1]))
return polar_fix
@property
def polar_deg(self):
return self._polar_deg
@polar_deg.setter
def polar_deg(self, val):
self._polar_deg = val
self._polar_rad = self.__polar_rad_view()
self._rect = self.__polar2rect(self._polar_rad)
@property
def sinusoid(self):
return self._sinusoid
@sinusoid.setter
def sinusoid(self, val):
self._sinusoid = val
def __polar2rect(self, r,phi=0):
"""
Output: class <complex>
"""
if isinstance(r,tuple) or isinstance(r,list):
return rect(r[0],r[1])
return rect(r,phi)
def __rect2polar(self,z):
"""
Polar cannot do math.
Output: class <tuple>
"""
return polar(z)
def __polar_rad_view(self):
"""
Does not return actual polar as actual polar needs to be in radians.
For viewing ONLY.
"""
polar = self._polar_deg
polar_fix = list()
polar_fix.append(polar[0])
polar_fix.append(radians(polar[1]))
return polar_fix
def __cos2Phasor(self, A,phi):
"""
Format: A*cos(wt+phi)
Output: [A, phi] which represents polar form A angle(phi)
"""
if A < 0:
return self.__cos2Phasor(-A, phi+radians(180))
else:
return A, phi
def __sin2Phasor(self, A, phi):
if A < 0:
return self.__sin2Phasor(-A, phi+radians(180))
else:
return A, phi-radians(90)
def sin_wave(self, format="rad"):
if format == "deg":
return f"{self._polar_rad[0]} sin (wt * {degrees(self.polar_rad[1]-radians(90))})"
return f"{self._polar_rad[0]} sin (wt * {self.polar_rad[1]-radians(90)})"
def cos_wave(self, format="rad"):
if format == "deg":
return f"{self._polar_rad[0]} cos (wt * {degrees(self.polar_rad[1])})"
return f"{self._polar_rad[0]} cos (wt * {self.polar_rad[1]})"
def __add__(self,other):
return signal(self._rect + other._rect)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self,other):
return signal(self._rect - other._rect)
def __rsub__(self, other):
#Doesn't work?
return other - self._rect
def __mul__(self,other):
return signal(self._rect * other._rect)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self,other):
return signal(self._rect / other._rect)
def __rtruediv__(self,other):
# return signal(other / self._rect)
return other / self._rect
def __pow__(self,other):
# return signal(other / self._rect)
return signal(self.rect**(other))
def sqrt(self):
return signal(self.rect**(1/2))
## NOTE:
# DERIVATIVE
# dv/dt == jwV
# time domain frquency domain
# INTEGRAL
# integrate(v,t) == V/jw
# time domain frquency domain
if __name__ == "__main__":
z1 = signal((40,50), format="polar_rad")
z2 = signal((20,-30), format="polar_deg")
| 26.60219
| 101
| 0.562491
|
79533a7f16d953e52c9ff412a06ddcf11be2153a
| 157,885
|
py
|
Python
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/operations/_replication_protected_items_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/operations/_replication_protected_items_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/operations/_replication_protected_items_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReplicationProtectedItemsOperations(object):
"""ReplicationProtectedItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicessiterecovery.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_replication_protection_containers(
self,
fabric_name, # type: str
protection_container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ReplicationProtectedItemCollection"]
"""Gets the list of Replication protected items.
Gets the list of ASR replication protected items in the protection container.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationProtectedItemCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItemCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItemCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_replication_protection_containers.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItemCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_replication_protection_containers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems'} # type: ignore
def get(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ReplicationProtectedItem"
"""Gets the details of a Replication protected item.
Gets the details of an ASR replication protected item.
:param fabric_name: Fabric unique name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReplicationProtectedItem, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def _create_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
input, # type: "_models.EnableProtectionInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'EnableProtectionInput')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def begin_create(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
input, # type: "_models.EnableProtectionInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Enables protection.
The operation to create an ASR replication protected item (Enable replication).
:param fabric_name: Name of the fabric.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: A name for the replication protected item.
:type replicated_protected_item_name: str
:param input: Enable Protection Input.
:type input: ~azure.mgmt.recoveryservicessiterecovery.models.EnableProtectionInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
input=input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def _purge_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
# Construct URL
url = self._purge_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_purge_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def begin_purge(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Purges protection.
The operation to delete or purge a replication protected item. This operation will force delete
the replication protected item. Use the remove operation on replication protected item to
perform a clean disable replication for the item.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._purge_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_purge.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def _update_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
update_protection_input, # type: "_models.UpdateReplicationProtectedItemInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_protection_input, 'UpdateReplicationProtectedItemInput')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def begin_update(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
update_protection_input, # type: "_models.UpdateReplicationProtectedItemInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Updates the replication protected item settings.
The operation to update the recovery settings of an ASR replication protected item.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param update_protection_input: Update protection input.
:type update_protection_input: ~azure.mgmt.recoveryservicessiterecovery.models.UpdateReplicationProtectedItemInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
update_protection_input=update_protection_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def _add_disks_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
add_disks_input, # type: "_models.AddDisksInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._add_disks_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(add_disks_input, 'AddDisksInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_add_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/addDisks'} # type: ignore
def begin_add_disks(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
add_disks_input, # type: "_models.AddDisksInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Add disk(s) for protection.
Operation to add disks(s) to the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param add_disks_input: Add disks input.
:type add_disks_input: ~azure.mgmt.recoveryservicessiterecovery.models.AddDisksInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._add_disks_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
add_disks_input=add_disks_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_add_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/addDisks'} # type: ignore
def _apply_recovery_point_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
apply_recovery_point_input, # type: "_models.ApplyRecoveryPointInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._apply_recovery_point_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(apply_recovery_point_input, 'ApplyRecoveryPointInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_apply_recovery_point_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/applyRecoveryPoint'} # type: ignore
def begin_apply_recovery_point(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
apply_recovery_point_input, # type: "_models.ApplyRecoveryPointInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Change or apply recovery point.
The operation to change the recovery point of a failed over replication protected item.
:param fabric_name: The ARM fabric name.
:type fabric_name: str
:param protection_container_name: The protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: The replicated protected item name.
:type replicated_protected_item_name: str
:param apply_recovery_point_input: The ApplyRecoveryPointInput.
:type apply_recovery_point_input: ~azure.mgmt.recoveryservicessiterecovery.models.ApplyRecoveryPointInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._apply_recovery_point_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
apply_recovery_point_input=apply_recovery_point_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_apply_recovery_point.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/applyRecoveryPoint'} # type: ignore
def _failover_cancel_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._failover_cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_failover_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/failoverCancel'} # type: ignore
def begin_failover_cancel(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute cancel failover.
Operation to cancel the failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._failover_cancel_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_failover_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/failoverCancel'} # type: ignore
def _failover_commit_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._failover_commit_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_failover_commit_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/failoverCommit'} # type: ignore
def begin_failover_commit(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute commit failover.
Operation to commit the failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._failover_commit_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_failover_commit.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/failoverCommit'} # type: ignore
def _planned_failover_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
failover_input, # type: "_models.PlannedFailoverInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._planned_failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(failover_input, 'PlannedFailoverInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_planned_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/plannedFailover'} # type: ignore
def begin_planned_failover(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
failover_input, # type: "_models.PlannedFailoverInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute planned failover.
Operation to initiate a planned failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param failover_input: Planned failover input.
:type failover_input: ~azure.mgmt.recoveryservicessiterecovery.models.PlannedFailoverInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._planned_failover_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
failover_input=failover_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_planned_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/plannedFailover'} # type: ignore
def _delete_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
disable_protection_input, # type: "_models.DisableProtectionInput"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disable_protection_input, 'DisableProtectionInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/remove'} # type: ignore
def begin_delete(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
disable_protection_input, # type: "_models.DisableProtectionInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Disables protection.
The operation to disable replication on a replication protected item. This will also remove the
item.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param disable_protection_input: Disable protection input.
:type disable_protection_input: ~azure.mgmt.recoveryservicessiterecovery.models.DisableProtectionInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
disable_protection_input=disable_protection_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/remove'} # type: ignore
def _remove_disks_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
remove_disks_input, # type: "_models.RemoveDisksInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._remove_disks_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(remove_disks_input, 'RemoveDisksInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_remove_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/removeDisks'} # type: ignore
def begin_remove_disks(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
remove_disks_input, # type: "_models.RemoveDisksInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Removes disk(s).
Operation to remove disk(s) from the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param remove_disks_input: Remove disks input.
:type remove_disks_input: ~azure.mgmt.recoveryservicessiterecovery.models.RemoveDisksInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._remove_disks_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
remove_disks_input=remove_disks_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_remove_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/removeDisks'} # type: ignore
def _repair_replication_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._repair_replication_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_repair_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/repairReplication'} # type: ignore
def begin_repair_replication(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Resynchronize or repair replication.
The operation to start resynchronize/repair replication for a replication protected item
requiring resynchronization.
:param fabric_name: The name of the fabric.
:type fabric_name: str
:param protection_container_name: The name of the container.
:type protection_container_name: str
:param replicated_protected_item_name: The name of the replication protected item.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._repair_replication_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_repair_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/repairReplication'} # type: ignore
def _reprotect_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
reprotect_input, # type: "_models.ReverseReplicationInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reprotect_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(reprotect_input, 'ReverseReplicationInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reprotect_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/reProtect'} # type: ignore
def begin_reprotect(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
reprotect_input, # type: "_models.ReverseReplicationInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute Reverse Replication\Reprotect.
Operation to reprotect or reverse replicate a failed over replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param reprotect_input: Reverse replication input.
:type reprotect_input: ~azure.mgmt.recoveryservicessiterecovery.models.ReverseReplicationInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reprotect_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
reprotect_input=reprotect_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reprotect.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/reProtect'} # type: ignore
def _resolve_health_errors_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
resolve_health_input, # type: "_models.ResolveHealthInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._resolve_health_errors_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resolve_health_input, 'ResolveHealthInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_resolve_health_errors_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/resolveHealthErrors'} # type: ignore
def begin_resolve_health_errors(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
resolve_health_input, # type: "_models.ResolveHealthInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Resolve health errors.
Operation to resolve health issues of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param resolve_health_input: Health issue input object.
:type resolve_health_input: ~azure.mgmt.recoveryservicessiterecovery.models.ResolveHealthInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resolve_health_errors_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
resolve_health_input=resolve_health_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resolve_health_errors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/resolveHealthErrors'} # type: ignore
def _test_failover_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
testfailover_input, # type: "_models.TestFailoverInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(testfailover_input, 'TestFailoverInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailover'} # type: ignore
def begin_test_failover(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
testfailover_input, # type: "_models.TestFailoverInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute test failover.
Operation to perform a test failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param testfailover_input: Test failover input.
:type testfailover_input: ~azure.mgmt.recoveryservicessiterecovery.models.TestFailoverInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_failover_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
testfailover_input=testfailover_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailover'} # type: ignore
def _test_failover_cleanup_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
cleanup_input, # type: "_models.TestFailoverCleanupInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_failover_cleanup_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cleanup_input, 'TestFailoverCleanupInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_failover_cleanup_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailoverCleanup'} # type: ignore
def begin_test_failover_cleanup(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
cleanup_input, # type: "_models.TestFailoverCleanupInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute test failover cleanup.
Operation to clean up the test failover of a replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param cleanup_input: Test failover cleanup input.
:type cleanup_input: ~azure.mgmt.recoveryservicessiterecovery.models.TestFailoverCleanupInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_failover_cleanup_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
cleanup_input=cleanup_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test_failover_cleanup.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailoverCleanup'} # type: ignore
def _unplanned_failover_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
failover_input, # type: "_models.UnplannedFailoverInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._unplanned_failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(failover_input, 'UnplannedFailoverInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_unplanned_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/unplannedFailover'} # type: ignore
def begin_unplanned_failover(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
failover_input, # type: "_models.UnplannedFailoverInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute unplanned failover.
Operation to initiate a failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param failover_input: Failover input.
:type failover_input: ~azure.mgmt.recoveryservicessiterecovery.models.UnplannedFailoverInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._unplanned_failover_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
failover_input=failover_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_unplanned_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/unplannedFailover'} # type: ignore
def _update_appliance_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
appliance_update_input, # type: "_models.UpdateApplianceForReplicationProtectedItemInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_appliance_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(appliance_update_input, 'UpdateApplianceForReplicationProtectedItemInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_appliance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/updateAppliance'} # type: ignore
def begin_update_appliance(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
appliance_update_input, # type: "_models.UpdateApplianceForReplicationProtectedItemInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Updates appliance for replication protected Item.
The operation to update appliance of an ASR replication protected item.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param appliance_update_input: Appliance update protection input.
:type appliance_update_input: ~azure.mgmt.recoveryservicessiterecovery.models.UpdateApplianceForReplicationProtectedItemInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_appliance_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
appliance_update_input=appliance_update_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_appliance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/updateAppliance'} # type: ignore
def _update_mobility_service_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replication_protected_item_name, # type: str
update_mobility_service_request, # type: "_models.UpdateMobilityServiceRequest"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_mobility_service_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicationProtectedItemName': self._serialize.url("replication_protected_item_name", replication_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_mobility_service_request, 'UpdateMobilityServiceRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_mobility_service_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicationProtectedItemName}/updateMobilityService'} # type: ignore
def begin_update_mobility_service(
self,
fabric_name, # type: str
protection_container_name, # type: str
replication_protected_item_name, # type: str
update_mobility_service_request, # type: "_models.UpdateMobilityServiceRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Update the mobility service on a protected item.
The operation to update(push update) the installed mobility service software on a replication
protected item to the latest available version.
:param fabric_name: The name of the fabric containing the protected item.
:type fabric_name: str
:param protection_container_name: The name of the container containing the protected item.
:type protection_container_name: str
:param replication_protected_item_name: The name of the protected item on which the agent is to
be updated.
:type replication_protected_item_name: str
:param update_mobility_service_request: Request to update the mobility service on the protected
item.
:type update_mobility_service_request: ~azure.mgmt.recoveryservicessiterecovery.models.UpdateMobilityServiceRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_mobility_service_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replication_protected_item_name=replication_protected_item_name,
update_mobility_service_request=update_mobility_service_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicationProtectedItemName': self._serialize.url("replication_protected_item_name", replication_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_mobility_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicationProtectedItemName}/updateMobilityService'} # type: ignore
def list(
self,
skip_token=None, # type: Optional[str]
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ReplicationProtectedItemCollection"]
"""Gets the list of replication protected items.
Gets the list of ASR replication protected items in the vault.
:param skip_token: The pagination token. Possible values: "FabricId" or "FabricId_CloudId" or
null.
:type skip_token: str
:param filter: OData filter options.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationProtectedItemCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItemCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItemCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItemCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationProtectedItems'} # type: ignore
| 57.163287
| 369
| 0.693099
|
79533aa1fe1fad4323ee4ae1ae27e432ee084c79
| 959
|
py
|
Python
|
scripts/WindowsVersion/AWC.py
|
Lyniat/AutomatedWallpaperChanger
|
76093f4f9bd20d8fdfd497f6dfbe93d22b17feac
|
[
"MIT"
] | null | null | null |
scripts/WindowsVersion/AWC.py
|
Lyniat/AutomatedWallpaperChanger
|
76093f4f9bd20d8fdfd497f6dfbe93d22b17feac
|
[
"MIT"
] | null | null | null |
scripts/WindowsVersion/AWC.py
|
Lyniat/AutomatedWallpaperChanger
|
76093f4f9bd20d8fdfd497f6dfbe93d22b17feac
|
[
"MIT"
] | 1
|
2021-07-19T17:32:04.000Z
|
2021-07-19T17:32:04.000Z
|
import os
import PySimpleGUI as sg
from gui import AWCGUI
from runtime import AWC
__author__ = "Marten Scheuck"
"""This runs the install process."""
#TODO: Make next and previous desktop wallpaper available
#TODO: Log when desktop wallpaper is change
def main():
"""This function runs the program"""
gui_install_update = AWCGUI
awc = AWC
try:
if not os.path.isfile("config.cfg"):
gui_install_update().run()
awc()
elif os.path.isfile("config.cfg"):
awc()
# Logs all errors
except Exception as e:
sg.PopupError("An Error has occurred! Program shutting down!")
if os.path.isfile("error.log"):
with open("error.log", "a") as f:
f.write("AWC.exe - ERROR: " + str(e) + '\n')
else:
with open("error.log", "w") as f:
f.write("AWC.exe - ERROR: " + str(e) + '\n')
if __name__ == "__main__":
main()
| 23.390244
| 70
| 0.578728
|
79533b2feead96de85c7ce775025ba2298560df2
| 609
|
py
|
Python
|
products/migrations/0008_auto_20210414_1630.py
|
KennyDaktyl/pieczatki-colop.pl
|
25719c57a4cf42face23a034852c712e0ae7c20b
|
[
"MIT"
] | null | null | null |
products/migrations/0008_auto_20210414_1630.py
|
KennyDaktyl/pieczatki-colop.pl
|
25719c57a4cf42face23a034852c712e0ae7c20b
|
[
"MIT"
] | null | null | null |
products/migrations/0008_auto_20210414_1630.py
|
KennyDaktyl/pieczatki-colop.pl
|
25719c57a4cf42face23a034852c712e0ae7c20b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-14 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0007_auto_20210412_1214'),
]
operations = [
migrations.AlterField(
model_name='colors',
name='class_text',
field=models.CharField(max_length=32, verbose_name='Text dla koloru klasy'),
),
migrations.AlterField(
model_name='colors',
name='name',
field=models.CharField(max_length=32, verbose_name='Nazwa koloru'),
),
]
| 25.375
| 88
| 0.599343
|
79533bf9f284ef1d7f325b4ec49d033459ac2152
| 786
|
py
|
Python
|
leetcode/single_number_II_leetcode_137/single_number_II.py
|
Williano/Interview-Prep
|
0ad688637215080c7e4d26c640d74c89227e7cfb
|
[
"MIT"
] | null | null | null |
leetcode/single_number_II_leetcode_137/single_number_II.py
|
Williano/Interview-Prep
|
0ad688637215080c7e4d26c640d74c89227e7cfb
|
[
"MIT"
] | null | null | null |
leetcode/single_number_II_leetcode_137/single_number_II.py
|
Williano/Interview-Prep
|
0ad688637215080c7e4d26c640d74c89227e7cfb
|
[
"MIT"
] | null | null | null |
"""
Leetcode No: 137
Title: Single Number II
Description:
Given an integer array nums where every element appears three times except
for one, which appears exactly once. Find the single element and return it.
You must implement a solution with a linear runtime complexity and use only
constant extra space.
Example 1:
Input: nums = [2,2,3,2]
Output: 3
Example 2:
Input: nums = [0,1,0,1,0,1,99]
Output: 99
"""
from typing import List
class Solution:
def single_number(self, nums: List[int]) -> int:
if len(nums) == 0:
return nums
if len(nums) < 2:
return nums[0]
for num in nums:
num_count = nums.count(num)
if num_count < 2:
return num
| 21.243243
| 78
| 0.597964
|
79533cee6eb2b8f6cb163335e97b46187b7b9a8b
| 8,359
|
py
|
Python
|
pymic/net/net2d/cople_net.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | 147
|
2019-12-23T02:52:04.000Z
|
2022-03-06T16:30:43.000Z
|
pymic/net/net2d/cople_net.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | 4
|
2020-12-18T12:47:21.000Z
|
2021-05-21T02:18:01.000Z
|
pymic/net/net2d/cople_net.py
|
vincentme/PyMIC
|
5cbbca7d0a19232be647086d4686ceea523f45ee
|
[
"Apache-2.0"
] | 32
|
2020-01-08T13:48:50.000Z
|
2022-03-12T06:31:13.000Z
|
# -*- coding: utf-8 -*-
"""
Author: Guotai Wang
Date: 12 June, 2020
Implementation of of COPLENet for COVID-19 pneumonia lesion segmentation from CT images.
Reference:
G. Wang et al. A Noise-robust Framework for Automatic Segmentation of COVID-19 Pneumonia Lesions
from CT Images. IEEE Transactions on Medical Imaging, 39(8),2020:2653-2663. DOI:10.1109/TMI.2020.3000314.
"""
from __future__ import print_function, division
import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size = 1):
super(ConvLayer, self).__init__()
padding = int((kernel_size - 1) / 2)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU()
)
def forward(self, x):
return self.conv(x)
class SEBlock(nn.Module):
def __init__(self, in_channels, r):
super(SEBlock, self).__init__()
redu_chns = int(in_channels / r)
self.se_layers = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, redu_chns, kernel_size=1, padding=0),
nn.LeakyReLU(),
nn.Conv2d(redu_chns, in_channels, kernel_size=1, padding=0),
nn.ReLU())
def forward(self, x):
f = self.se_layers(x)
return f*x + x
class ASPPBlock(nn.Module):
def __init__(self,in_channels, out_channels_list, kernel_size_list, dilation_list):
super(ASPPBlock, self).__init__()
self.conv_num = len(out_channels_list)
assert(self.conv_num == 4)
assert(self.conv_num == len(kernel_size_list) and self.conv_num == len(dilation_list))
pad0 = int((kernel_size_list[0] - 1) / 2 * dilation_list[0])
pad1 = int((kernel_size_list[1] - 1) / 2 * dilation_list[1])
pad2 = int((kernel_size_list[2] - 1) / 2 * dilation_list[2])
pad3 = int((kernel_size_list[3] - 1) / 2 * dilation_list[3])
self.conv_1 = nn.Conv2d(in_channels, out_channels_list[0], kernel_size = kernel_size_list[0],
dilation = dilation_list[0], padding = pad0 )
self.conv_2 = nn.Conv2d(in_channels, out_channels_list[1], kernel_size = kernel_size_list[1],
dilation = dilation_list[1], padding = pad1 )
self.conv_3 = nn.Conv2d(in_channels, out_channels_list[2], kernel_size = kernel_size_list[2],
dilation = dilation_list[2], padding = pad2 )
self.conv_4 = nn.Conv2d(in_channels, out_channels_list[3], kernel_size = kernel_size_list[3],
dilation = dilation_list[3], padding = pad3 )
out_channels = out_channels_list[0] + out_channels_list[1] + out_channels_list[2] + out_channels_list[3]
self.conv_1x1 = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU())
def forward(self, x):
x1 = self.conv_1(x)
x2 = self.conv_2(x)
x3 = self.conv_3(x)
x4 = self.conv_4(x)
y = torch.cat([x1, x2, x3, x4], dim=1)
y = self.conv_1x1(y)
return y
class ConvBNActBlock(nn.Module):
"""Two convolution layers with batch norm, leaky relu, dropout and SE block"""
def __init__(self,in_channels, out_channels, dropout_p):
super(ConvBNActBlock, self).__init__()
self.conv_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(),
nn.Dropout(dropout_p),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(),
SEBlock(out_channels, 2)
)
def forward(self, x):
return self.conv_conv(x)
class DownBlock(nn.Module):
"""Downsampling by a concantenation of max-pool and avg-pool, followed by ConvBNActBlock
"""
def __init__(self, in_channels, out_channels, dropout_p):
super(DownBlock, self).__init__()
self.maxpool = nn.MaxPool2d(2)
self.avgpool = nn.AvgPool2d(2)
self.conv = ConvBNActBlock(2 * in_channels, out_channels, dropout_p)
def forward(self, x):
x_max = self.maxpool(x)
x_avg = self.avgpool(x)
x_cat = torch.cat([x_max, x_avg], dim=1)
y = self.conv(x_cat)
return y + x_cat
class UpBlock(nn.Module):
"""Upssampling followed by ConvBNActBlock"""
def __init__(self, in_channels1, in_channels2, out_channels,
bilinear=True, dropout_p = 0.5):
super(UpBlock, self).__init__()
self.bilinear = bilinear
if bilinear:
self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size = 1)
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels1, in_channels2, kernel_size=2, stride=2)
self.conv = ConvBNActBlock(in_channels2 * 2, out_channels, dropout_p)
def forward(self, x1, x2):
if self.bilinear:
x1 = self.conv1x1(x1)
x1 = self.up(x1)
x_cat = torch.cat([x2, x1], dim=1)
y = self.conv(x_cat)
return y + x_cat
class COPLENet(nn.Module):
def __init__(self, params):
super(COPLENet, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
self.dropout = self.params['dropout']
assert(len(self.ft_chns) == 5)
f0_half = int(self.ft_chns[0] / 2)
f1_half = int(self.ft_chns[1] / 2)
f2_half = int(self.ft_chns[2] / 2)
f3_half = int(self.ft_chns[3] / 2)
self.in_conv= ConvBNActBlock(self.in_chns, self.ft_chns[0], self.dropout[0])
self.down1 = DownBlock(self.ft_chns[0], self.ft_chns[1], self.dropout[1])
self.down2 = DownBlock(self.ft_chns[1], self.ft_chns[2], self.dropout[2])
self.down3 = DownBlock(self.ft_chns[2], self.ft_chns[3], self.dropout[3])
self.down4 = DownBlock(self.ft_chns[3], self.ft_chns[4], self.dropout[4])
self.bridge0= ConvLayer(self.ft_chns[0], f0_half)
self.bridge1= ConvLayer(self.ft_chns[1], f1_half)
self.bridge2= ConvLayer(self.ft_chns[2], f2_half)
self.bridge3= ConvLayer(self.ft_chns[3], f3_half)
self.up1 = UpBlock(self.ft_chns[4], f3_half, self.ft_chns[3], dropout_p = self.dropout[3])
self.up2 = UpBlock(self.ft_chns[3], f2_half, self.ft_chns[2], dropout_p = self.dropout[2])
self.up3 = UpBlock(self.ft_chns[2], f1_half, self.ft_chns[1], dropout_p = self.dropout[1])
self.up4 = UpBlock(self.ft_chns[1], f0_half, self.ft_chns[0], dropout_p = self.dropout[0])
f4 = self.ft_chns[4]
aspp_chns = [int(f4 / 4), int(f4 / 4), int(f4 / 4), int(f4 / 4)]
aspp_knls = [1, 3, 3, 3]
aspp_dila = [1, 2, 4, 6]
self.aspp = ASPPBlock(f4, aspp_chns, aspp_knls, aspp_dila)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size = 3, padding = 1)
def forward(self, x):
x_shape = list(x.shape)
if(len(x_shape) == 5):
[N, C, D, H, W] = x_shape
new_shape = [N*D, C, H, W]
x = torch.transpose(x, 1, 2)
x = torch.reshape(x, new_shape)
x0 = self.in_conv(x)
x0b = self.bridge0(x0)
x1 = self.down1(x0)
x1b = self.bridge1(x1)
x2 = self.down2(x1)
x2b = self.bridge2(x2)
x3 = self.down3(x2)
x3b = self.bridge3(x3)
x4 = self.down4(x3)
x4 = self.aspp(x4)
x = self.up1(x4, x3b)
x = self.up2(x, x2b)
x = self.up3(x, x1b)
x = self.up4(x, x0b)
output = self.out_conv(x)
if(len(x_shape) == 5):
new_shape = [N, D] + list(output.shape)[1:]
output = torch.reshape(output, new_shape)
output = torch.transpose(output, 1, 2)
return output
| 41.17734
| 113
| 0.6039
|
79533d0eb65e889ec8aa60424f545aaed81c95da
| 525
|
py
|
Python
|
pys60/bbdata/connectionstate.py
|
crutchwalkfactory/jaikuengine-mobile-client
|
c47100ec009d47a4045b3d98addc9b8ad887b132
|
[
"MIT"
] | null | null | null |
pys60/bbdata/connectionstate.py
|
crutchwalkfactory/jaikuengine-mobile-client
|
c47100ec009d47a4045b3d98addc9b8ad887b132
|
[
"MIT"
] | null | null | null |
pys60/bbdata/connectionstate.py
|
crutchwalkfactory/jaikuengine-mobile-client
|
c47100ec009d47a4045b3d98addc9b8ad887b132
|
[
"MIT"
] | null | null | null |
from bbdata.base import *
from bbdata.uid import *
CONNECTIONSTATE_TYPE = [ CONTEXT_UID_SENSORDATAFACTORY, 64, 1, 0 ]
class ConnectionState(BBCompound):
def __init__(self, name='connectionstate'):
super(BBCompound, self).__init__(name)
self.name = ShortString('name')
self.state = Int('state')
self.message = LongString('message')
self.error = ErrorInfo('error')
self.retry = Time('retry')
#@classmethod
def type(self):
return CONNECTIONSTATE_TYPE
type=classmethod(type)
ConnectionState.add_to_factory()
| 26.25
| 66
| 0.746667
|
79533e3f5bf8fb9f6713cd3c82fd4fefe97b4c8b
| 68
|
py
|
Python
|
2020/examples-in-class-2020-09-24/for_loop_example2.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | 1
|
2021-02-04T16:59:11.000Z
|
2021-02-04T16:59:11.000Z
|
2020/examples-in-class-2020-09-24/for_loop_example2.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | null | null | null |
2020/examples-in-class-2020-09-24/for_loop_example2.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | 1
|
2019-10-30T14:37:48.000Z
|
2019-10-30T14:37:48.000Z
|
start = 1
stop = 10
for index in range(start,stop):
print(index)
| 17
| 31
| 0.676471
|
79533e4e47c9037c119100d9e267ebe23e9fa81b
| 616
|
py
|
Python
|
blog/mysite/blog/migrations/0004_post_tags.py
|
EssaAlshammri/django-by-example
|
d1a1cba9308d4f19bbb1228dbd191ad5540b2c78
|
[
"MIT"
] | 3
|
2017-04-25T10:19:02.000Z
|
2017-06-07T12:50:30.000Z
|
blog/mysite/blog/migrations/0004_post_tags.py
|
EssaAlshammri/django-by-example
|
d1a1cba9308d4f19bbb1228dbd191ad5540b2c78
|
[
"MIT"
] | null | null | null |
blog/mysite/blog/migrations/0004_post_tags.py
|
EssaAlshammri/django-by-example
|
d1a1cba9308d4f19bbb1228dbd191ad5540b2c78
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-20 18:38
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('blog', '0003_auto_20170320_1247'),
]
operations = [
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| 26.782609
| 162
| 0.646104
|
795340c9912642948b0953c9b7ef0df8f0277957
| 2,136
|
py
|
Python
|
project/api/views.py
|
pjconnolly12/Flasktaskr
|
70f54e92c6cc59b4b392379d2ea9c5d8a73d2dee
|
[
"MIT"
] | null | null | null |
project/api/views.py
|
pjconnolly12/Flasktaskr
|
70f54e92c6cc59b4b392379d2ea9c5d8a73d2dee
|
[
"MIT"
] | null | null | null |
project/api/views.py
|
pjconnolly12/Flasktaskr
|
70f54e92c6cc59b4b392379d2ea9c5d8a73d2dee
|
[
"MIT"
] | null | null | null |
# project/api/views.py
from functools import wraps
from flask import flash, redirect, jsonify, \
session, url_for, Blueprint, make_response
from project import db
from project.models import Task
################
#### config ####
################
api_blueprint = Blueprint('api', __name__)
##########################
#### helper functions ####
##########################
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('users.login'))
return wrap
def open_tasks():
return db.session.query(Task).filter_by(
status='1').order_by(Task.due_date.asc())
def closed_tasks():
return db.session.query(Task).filter_by(
status='0').order_by(Task.due_date.asc())
################
#### routes ####
################
@api_blueprint.route('/api/v1/tasks/')
def api_tasks():
results = db.session.query(Task).limit(10).offset(0).all()
json_results = []
for result in results:
data = {
'task_id': result.task_id,
'task name': result.name,
'due date': str(result.due_date),
'priority': result.priority,
'posted date': str(result.posted_date),
'status': result.status,
'user id': result.user_id
}
json_results.append(data)
return jsonify(items=json_results)
@api_blueprint.route('/api/v1/tasks/<int:task_id>')
def task(task_id):
result = db.session.query(Task).filter_by(task_id=task_id).first()
if result:
result = {
'task_id': result.task_id,
'task name': result.name,
'due date': str(result.due_date),
'priority': result.priority,
'posted date': str(result.posted_date),
'status': result.status,
'user id': result.user_id
}
code = 200
else:
result = {"error": "Element does not exist"}
code = 404
return make_response(jsonify(result), code)
| 26.04878
| 70
| 0.561798
|
7953412485dde982fa8d741724f7d3e410f9ce70
| 9,704
|
py
|
Python
|
models/crabnet.py
|
kaaiian/KingCrabNet
|
05ffbcc48cd692223c475ebd8ca758e01ded6521
|
[
"MIT"
] | 1
|
2020-03-04T06:21:36.000Z
|
2020-03-04T06:21:36.000Z
|
models/crabnet.py
|
kaaiian/KingCrabNet
|
05ffbcc48cd692223c475ebd8ca758e01ded6521
|
[
"MIT"
] | null | null | null |
models/crabnet.py
|
kaaiian/KingCrabNet
|
05ffbcc48cd692223c475ebd8ca758e01ded6521
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from torch import nn
# %%
class AttentionBlock(nn.Module):
"""
This implements the multi-headed attention block
of the CrabNet architecture.
Parameters
----------
d_model: int
the number of expected features in the input (required, default=32).
nhead: int
the number of heads in the multiheadattention models (required,
default=2).
dim_feedforward: int
the dimension of the feedforward network model (required, default=16).
dropout: float
the dropout value (default=0.1).
edm: bool
specifies whether the input X matrix is of type EDM
or not (optional, default=False).
"""
def __init__(self,
compute_device,
d_model=32,
nhead=2,
dim_feedforward=16,
dropout=0.1,
edm=False):
super(AttentionBlock, self).__init__()
self.compute_device = compute_device
self.d_model = d_model
self.dim_feedforward = dim_feedforward
self.nhead = nhead
self.edm = edm
self.dropout = dropout
self.softmax = nn.Softmax(dim=-1)
self.layernorm0 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.layernorm1a = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.layernorm1b = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.layernorm2a = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.layernorm2b = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.dropout1 = nn.Dropout(p=self.dropout)
# self.fc_q = nn.Linear(self.d_model, self.dim_feedforward, bias=False)
# self.fc_k = nn.Linear(self.d_model, self.dim_feedforward, bias=False)
# self.fc_v = nn.Linear(self.d_model, self.dim_feedforward, bias=False)
self.fc_q_list = nn.ModuleList(
[nn.Linear(self.d_model, self.dim_feedforward, bias=True)
for _ in range(self.nhead)]
)
self.fc_k_list = nn.ModuleList(
[nn.Linear(self.d_model, self.dim_feedforward, bias=True)
for _ in range(self.nhead)]
)
self.fc_v_list = nn.ModuleList(
[nn.Linear(self.d_model, self.dim_feedforward, bias=True)
for _ in range(self.nhead)]
)
self.fc_o = nn.Linear(self.nhead * self.dim_feedforward,
self.d_model,
bias=True)
self.fc1 = nn.Linear(self.d_model, self.d_model)
self.fc2 = nn.Linear(self.d_model, self.d_model)
self.leaky = nn.LeakyReLU()
def forward(self, x):
"""
Forward pass of the attention block.
Parameters
----------
x: torch.Tensor
A representation of the chemical compounds in the shape
(batch, n_compounds, n_elements, n_feats) in the case of EDM data,
(batch, n_compounds, n_feats) in the case of non-EDM data.
Returns
-------
r: nn.Variable shape (a, b)
The result of the forward pass through the attention block.
"""
sqrt = np.sqrt(self.dim_feedforward)
sqrt = torch.as_tensor(sqrt).to(self.compute_device)
x = self.layernorm0(x)
# Self-attention
z_list = self.nhead * [None]
for i in range(self.nhead):
q = self.fc_q_list[i](x)
k = self.fc_k_list[i](x)
v = self.fc_v_list[i](x)
# q = self.leaky(self.fc_q_list[i](x))
# k = self.leaky(self.fc_k_list[i](x))
# v = self.leaky(self.fc_v_list[i](x))
# q = self.fc_q(x)
# k = self.fc_k(x)
# v = self.fc_v(x)
# q = self.leaky(self.fc_q(x))
# k = self.leaky(self.fc_k(x))
# v = self.leaky(self.fc_v(x))
k_t = torch.transpose(k, dim0=-2, dim1=-1)
qk_t = torch.matmul(q, k_t)
soft = self.softmax(qk_t / sqrt)
# eye = torch.eye(4).to(self.compute_device)
# soft = soft + eye
# soft.register_hook(lambda x: print(x[0]))
soft = self.dropout1(soft)
# if i == 0:
# print(soft[0, :, :])
z = torch.matmul(soft, v)
z_list[i] = z
z = torch.cat(z_list, dim=-1)
z = self.fc_o(z)
# Feed-forward
r0 = x + z
r0 = self.layernorm1a(r0)
r0 = self.fc1(r0)
r0 = self.layernorm1b(r0)
r1 = r0 + x
r1 = self.layernorm2a(r1)
r = self.fc2(r1)
r = self.layernorm2b(r)
return r
# %%
class CrabNet(nn.Module):
"""
This implements the overall CrabNet architecture.
Parameters
----------
input_dims: int
the number of expected features in the input (required).
d_model: int
the number of element embedding dimensions (optional, default=64).
nhead: int
the number of heads in the multi-headed attention mechanism (optional,
default=4).
num_layers: int
the number of sub-encoder-layers in the encoder (optional, default=2).
dim_feedforward: int
the dimension of the feedforward network model (optional, default=16).
dropout: float
the dropout value (optional, default=0.1).
edm: bool
specifies whether the input X matrix is of type EDM
or not (optional, default=False).
"""
def __init__(self,
compute_device,
input_dims,
d_model=201,
nhead=4,
num_layers=2,
dim_feedforward=16,
dropout=0.1,
edm=False):
super(CrabNet, self).__init__()
self.compute_device = compute_device
self.input_dims = input_dims
self.d_model = d_model
self.nhead = nhead
self.num_layers = num_layers
self.dim_feedforward = dim_feedforward
self.dropout = dropout
self.edm = edm
self.output_dims = 1
self.dropout1 = nn.Dropout(p=self.dropout)
self.prelu1 = nn.PReLU(num_parameters=8)
self.fcmask = nn.Linear(self.input_dims, 1, bias=False)
self.fc1 = nn.Linear(self.input_dims, self.d_model)
self.fc2 = nn.Linear(self.d_model, self.d_model)
# self.fc3 = nn.Linear(self.d_model, self.output_dims)
self.attentionblocks = nn.ModuleList(
[AttentionBlock(compute_device=self.compute_device,
d_model=self.d_model,
nhead=self.nhead,
dim_feedforward=self.dim_feedforward,
dropout=self.dropout,
edm=self.edm)
for _ in range(self.num_layers)]
)
# define an output neural network
out_hidden = [1024, 512, 256, 256, 128]
self.output_nn = ResidualNetwork(self.d_model, 1, out_hidden)
self.leaky = nn.LeakyReLU()
def forward(self, x):
"""
Forward pass of the CrabNet model.
Parameters
----------
x: torch.Tensor
A representation of the chemical compounds in the shape
(n_compounds, n_elements, n_feats) in the case of EDM data,
(n_compounds, n_feats) in the case of non-EDM data.
Returns
-------
y: torch.Tensor
The element property prediction with the shape 1.
"""
x0 = self.fcmask(x)
x0 = self.leaky(x0)
# x = self.dropout1(x)
# print(x[0, :, :])
for i, block in enumerate(self.attentionblocks):
x = block(x)
# if i == 0 and self.edm:
# x = self.prelu1(x)
x = self.output_nn(x)
# print(x.shape)
# print(x0.shape)
# x = self.fc2(x)
# x = self.fc3(x)
# print(x[0, :, :])
x = x * x0
# print(x[0, :, :])
if self.edm:
x = torch.sum(x, dim=-2)
y = x
return y
class ResidualNetwork(nn.Module):
"""
Feed forward Residual Neural Network
"""
def __init__(self, input_dim, output_dim, hidden_layer_dims):
"""
Inputs
----------
input_dim: int
output_dim: int
hidden_layer_dims: list(int)
"""
super(ResidualNetwork, self).__init__()
dims = [input_dim]+hidden_layer_dims
self.fcs = nn.ModuleList([nn.Linear(dims[i], dims[i+1])
for i in range(len(dims)-1)])
# self.bns = nn.ModuleList([nn.BatchNorm1d(dims[i+1])
# for i in range(len(dims)-1)])
self.res_fcs = nn.ModuleList([nn.Linear(dims[i], dims[i+1], bias=False)
if (dims[i] != dims[i+1])
else nn.Identity()
for i in range(len(dims)-1)])
self.acts = nn.ModuleList([nn.ReLU() for _ in range(len(dims)-1)])
self.fc_out = nn.Linear(dims[-1], output_dim)
def forward(self, fea):
# for fc, bn, res_fc, act in zip(self.fcs, self.bns,
# self.res_fcs, self.acts):
# fea = act(bn(fc(fea)))+res_fc(fea)
for fc, res_fc, act in zip(self.fcs, self.res_fcs, self.acts):
fea = act(fc(fea))+res_fc(fea)
return self.fc_out(fea)
def __repr__(self):
return '{}'.format(self.__class__.__name__)
| 33.232877
| 79
| 0.540293
|
7953412be5f3e29ec43af40f6a7a831c8e66269a
| 3,994
|
py
|
Python
|
astroquery/xmatch/tests/test_xmatch.py
|
astrocatalogs/astroquery
|
9919a32cb027febcd73cd743efaae6754061a534
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T23:47:09.000Z
|
2020-04-18T23:47:09.000Z
|
astroquery/xmatch/tests/test_xmatch.py
|
astrocatalogs/astroquery
|
9919a32cb027febcd73cd743efaae6754061a534
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/xmatch/tests/test_xmatch.py
|
astrocatalogs/astroquery
|
9919a32cb027febcd73cd743efaae6754061a534
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os.path
import requests
from astropy.tests.helper import pytest
from astropy.io import ascii
from astropy.table import Table
from astropy.units import arcsec
from ...utils import commons
from ...utils.testing_tools import MockResponse
from ...xmatch import XMatch
DATA_FILES = {
'get': 'tables.csv', # .action.getVizieRTableNames
'post': 'query_res.csv', # .request.xmatch
}
class MockResponseXmatch(MockResponse):
def __init__(self, method, url, data, **kwargs):
super(MockResponseXmatch, self).__init__(**kwargs)
self.data = data
fn = data_path(DATA_FILES[method.lower()])
with open(fn, 'rb') as f:
self.content = f.read()
def get_content(self):
return self.content
@pytest.fixture
def patch_request(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests, "_request", request_mockreturn)
return mp
def request_mockreturn(method, url, data, **kwargs):
return MockResponseXmatch(method, url, data)
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_xmatch_query_invalid_max_distance():
with pytest.raises(ValueError) as ex:
XMatch().query_async('', '', 181 * arcsec)
assert str(ex.value) == (
'max_distance argument must not be greater than 180')
def test_get_available_tables(monkeypatch):
xm = XMatch()
monkeypatch.setattr(xm, '_request', request_mockreturn)
tables = xm.get_available_tables()
assert tables
assert 'II/311/wise' in tables
assert 'II/246/out' in tables
def test_xmatch_is_avail_table(monkeypatch):
xm = XMatch()
monkeypatch.setattr(xm, '_request', request_mockreturn)
assert xm.is_table_available('II/311/wise')
assert xm.is_table_available('II/246/out')
assert xm.is_table_available('vizier:II/311/wise')
assert not xm.is_table_available('blablabla')
def test_xmatch_query_local(monkeypatch):
xm = XMatch()
monkeypatch.setattr(xm, '_request', request_mockreturn)
monkeypatch.setattr(
commons,
'send_request',
lambda url, data, timeout, request_type='POST', headers={}, **kwargs:
request_mockreturn(request_type, url, data, **kwargs))
with open(data_path('posList.csv')) as pos_list:
response = xm.query_async(
cat1=pos_list, cat2='vizier:II/246/out', max_distance=5 * arcsec,
colRA1='ra', colDec1='dec')
table = ascii.read(response.text, format='csv')
assert isinstance(table, Table)
assert table.colnames == [
'angDist', 'ra', 'dec', 'my_id', '2MASS', 'RAJ2000', 'DEJ2000',
'errHalfMaj', 'errHalfMin', 'errPosAng', 'Jmag', 'Hmag', 'Kmag',
'e_Jmag', 'e_Hmag', 'e_Kmag', 'Qfl', 'Rfl', 'X', 'MeasureJD']
assert len(table) == 11
def test_xmatch_query_cat1_table_local(monkeypatch):
xm = XMatch()
monkeypatch.setattr(xm, '_request', request_mockreturn)
monkeypatch.setattr(
commons,
'send_request',
lambda url, data, timeout, request_type='POST', headers={}, **kwargs:
request_mockreturn(request_type, url, data, **kwargs))
with open(data_path('posList.csv')) as pos_list:
input_table = Table.read(pos_list.readlines(),
format='ascii.csv',
guess=False)
response = xm.query_async(
cat1=input_table, cat2='vizier:II/246/out', max_distance=5 * arcsec,
colRA1='ra', colDec1='dec')
table = ascii.read(response.text, format='csv')
assert isinstance(table, Table)
assert table.colnames == [
'angDist', 'ra', 'dec', 'my_id', '2MASS', 'RAJ2000', 'DEJ2000',
'errHalfMaj', 'errHalfMin', 'errPosAng', 'Jmag', 'Hmag', 'Kmag',
'e_Jmag', 'e_Hmag', 'e_Kmag', 'Qfl', 'Rfl', 'X', 'MeasureJD']
assert len(table) == 11
| 33.847458
| 77
| 0.655233
|
7953414ad2e63a0b829aa71b9c2de91044d1b254
| 6,601
|
py
|
Python
|
sdk/python/pulumi_azure_native/peering/v20200401/get_peering.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/peering/v20200401/get_peering.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/peering/v20200401/get_peering.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPeeringResult',
'AwaitableGetPeeringResult',
'get_peering',
]
@pulumi.output_type
class GetPeeringResult:
"""
Peering is a logical representation of a set of connections to the Microsoft Cloud Edge at a location.
"""
def __init__(__self__, direct=None, exchange=None, id=None, kind=None, location=None, name=None, peering_location=None, provisioning_state=None, sku=None, tags=None, type=None):
if direct and not isinstance(direct, dict):
raise TypeError("Expected argument 'direct' to be a dict")
pulumi.set(__self__, "direct", direct)
if exchange and not isinstance(exchange, dict):
raise TypeError("Expected argument 'exchange' to be a dict")
pulumi.set(__self__, "exchange", exchange)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peering_location and not isinstance(peering_location, str):
raise TypeError("Expected argument 'peering_location' to be a str")
pulumi.set(__self__, "peering_location", peering_location)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def direct(self) -> Optional['outputs.PeeringPropertiesDirectResponse']:
"""
The properties that define a direct peering.
"""
return pulumi.get(self, "direct")
@property
@pulumi.getter
def exchange(self) -> Optional['outputs.PeeringPropertiesExchangeResponse']:
"""
The properties that define an exchange peering.
"""
return pulumi.get(self, "exchange")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the peering.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> Optional[str]:
"""
The location of the peering.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.PeeringSkuResponse':
"""
The SKU that defines the tier and kind of the peering.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPeeringResult(GetPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPeeringResult(
direct=self.direct,
exchange=self.exchange,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
peering_location=self.peering_location,
provisioning_state=self.provisioning_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_peering(peering_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPeeringResult:
"""
Peering is a logical representation of a set of connections to the Microsoft Cloud Edge at a location.
:param str peering_name: The name of the peering.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['peeringName'] = peering_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:peering/v20200401:getPeering', __args__, opts=opts, typ=GetPeeringResult).value
return AwaitableGetPeeringResult(
direct=__ret__.direct,
exchange=__ret__.exchange,
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
peering_location=__ret__.peering_location,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 33.507614
| 181
| 0.626875
|
79534294295df3eccf5c2d9a5606ce7bbac0dea8
| 389
|
py
|
Python
|
test/syste_test_IEEE13bus_timeSeries/IEEE13bus.py
|
JonasVil/dssdata
|
bbc6ce7509dec39143d5202f11acbf5e977e7c06
|
[
"MIT"
] | 4
|
2020-05-08T21:32:50.000Z
|
2022-01-14T08:23:39.000Z
|
test/syste_test_IEEE13bus_timeSeries/IEEE13bus.py
|
julioolvera1/dssdata
|
541bb4dc449c0035177ed09f00111ddf46d76b3d
|
[
"MIT"
] | 26
|
2020-04-06T17:11:31.000Z
|
2020-05-04T14:50:50.000Z
|
test/syste_test_IEEE13bus_timeSeries/IEEE13bus.py
|
julioolvera1/dssdata
|
541bb4dc449c0035177ed09f00111ddf46d76b3d
|
[
"MIT"
] | 12
|
2020-04-06T17:11:45.000Z
|
2020-05-01T13:37:47.000Z
|
import opendssdirect as dss
path = "test/syste_test_IEEE13bus_timeSeries/IEEE13Nodeckt.dss"
dss.run_command(f"Compile {path}")
dss.run_command("Set Voltagebases=[115, 4.16, .48]")
dss.run_command("calcv")
dss.run_command("set mode=daily stepsize=5m hour = 0")
dss.Solution.Solve()
dss.run_command("Show Voltages LN Nodes")
dss.run_command("Show Losses")
dss.run_command("Show Currents")
| 29.923077
| 63
| 0.773779
|
795342f75b3e105f6872c521ee94c552a752bb5c
| 1,621
|
py
|
Python
|
Python/zzz_training_challenge/UdemyPythonPro/Chapter10_Cython/2_CythonCode/benchmarks/test_clipping.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | 1
|
2022-03-02T07:16:30.000Z
|
2022-03-02T07:16:30.000Z
|
Python/zzz_training_challenge/UdemyPythonPro/Chapter10_Cython/2_CythonCode/benchmarks/test_clipping.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/UdemyPythonPro/Chapter10_Cython/2_CythonCode/benchmarks/test_clipping.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
'''Test code.
'''
import random
from timeit import Timer
import numpy as np
import fastvector
v = fastvector.VectorND([random.random() for _ in range(100_000)])
a = np.array([random.random() for _ in range(100_000)])
num_runs = 100
import_string = (
'''
from __main__ import v, a
import fastvector
import numpy as np
'''
)
python_timer = Timer(
'fastvector.python_clip_vector(v, -1, 1, v)',
setup=import_string
)
naive_cython_timer = Timer(
'fastvector.naive_cython_clip_vector(v, -1, 1, v)',
setup=import_string
)
cython_timer = Timer(
'fastvector.cython_clip_vector(v, -1, 1, v)',
setup=import_string
)
np_timer = Timer(
'np.clip(a, -1, 1, a)',
setup=import_string
)
def main():
python_mean_time = np.mean(python_timer.repeat(repeat=num_runs, number=1))
print(f'fastvector.python_clip_vector: {python_mean_time}')
naive_cython_mean_time = np.mean(naive_cython_timer.repeat(repeat=num_runs, number=1))
print(f'fastvector.naive_cython_clip_vector: {naive_cython_mean_time}')
cython_mean_time = np.mean(cython_timer.repeat(repeat=num_runs, number=1))
print(f'fastvector.cython_clip_vector: {cython_mean_time}')
np_mean_time = np.mean(np_timer.repeat(repeat=num_runs, number=1))
print(f'np.clip: {np_mean_time}')
print(f'execution time speedup to python: {round(python_mean_time / cython_mean_time, 1)}x')
print(f'execution time speedup to naive: {round(naive_cython_mean_time / cython_mean_time, 1)}x')
print(f'execution time speedup to numpy: {round(np_mean_time / cython_mean_time, 1)}x')
if __name__ == '__main__':
main()
| 27.016667
| 101
| 0.721777
|
795342f8422f69200ec25dc470335a2e84edeb59
| 10,482
|
py
|
Python
|
container_sdk/api/cluster/cluster_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
container_sdk/api/cluster/cluster_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
container_sdk/api/cluster/cluster_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import container_sdk.model.container.cluster_pb2
import container_sdk.api.cluster.delete_cluster_pb2
import google.protobuf.empty_pb2
import container_sdk.api.cluster.get_pb2
import container_sdk.api.cluster.import_resources_pb2
import container_sdk.api.cluster.list_pb2
import container_sdk.api.cluster.update_pb2
import container_sdk.utils.http_util
import google.protobuf.json_format
class ClusterClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create(self, request, org, user, timeout=10):
# type: (container_sdk.model.container.cluster_pb2.Cluster, int, str, int) -> container_sdk.model.container.cluster_pb2.Cluster
"""
创建k8s集群
:param request: create请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: container_sdk.model.container.cluster_pb2.Cluster
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.cluster.Create"
uri = "/api/container/v1/clusters"
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = container_sdk.model.container.cluster_pb2.Cluster()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_cluster(self, request, org, user, timeout=10):
# type: (container_sdk.api.cluster.delete_cluster_pb2.DeleteClusterRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
删除指定k8s集群
:param request: delete_cluster请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.cluster.DeleteCluster"
uri = "/api/container/v1/clusters/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def get(self, request, org, user, timeout=10):
# type: (container_sdk.api.cluster.get_pb2.GetRequest, int, str, int) -> container_sdk.model.container.cluster_pb2.Cluster
"""
获取k8s集群详情
:param request: get请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: container_sdk.model.container.cluster_pb2.Cluster
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.cluster.Get"
uri = "/api/container/v1/clusters/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = container_sdk.model.container.cluster_pb2.Cluster()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def import_resources(self, request, org, user, timeout=10):
# type: (container_sdk.api.cluster.import_resources_pb2.ImportResourcesRequest, int, str, int) -> container_sdk.api.cluster.import_resources_pb2.ImportResourcesResponse
"""
导入k8s集群资源
:param request: import_resources请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: container_sdk.api.cluster.import_resources_pb2.ImportResourcesResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.cluster.ImportResources"
uri = "/api/container/v1/clusters/{instanceId}/import".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = container_sdk.api.cluster.import_resources_pb2.ImportResourcesResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list(self, request, org, user, timeout=10):
# type: (container_sdk.api.cluster.list_pb2.ListRequest, int, str, int) -> container_sdk.api.cluster.list_pb2.ListResponse
"""
获取k8s集群列表
:param request: list请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: container_sdk.api.cluster.list_pb2.ListResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.cluster.List"
uri = "/api/container/v1/clusters"
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = container_sdk.api.cluster.list_pb2.ListResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def update(self, request, org, user, timeout=10):
# type: (container_sdk.api.cluster.update_pb2.UpdateRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
更新k8s集群
:param request: update请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.cluster.Update"
uri = "/api/container/v1/clusters/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request.cluster
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
| 36.778947
| 176
| 0.613623
|
7953439beb74850f7ebc720c50350daa5e788921
| 3,080
|
py
|
Python
|
source/_static/code/aiyagari/aiyagari_household.py
|
tuttugu-ryo/lecture-source-py
|
9ce84044c2cc421775ea63a004556d7ae3b4e504
|
[
"BSD-3-Clause"
] | 56
|
2017-05-09T10:45:23.000Z
|
2022-01-20T20:33:27.000Z
|
source/_static/code/aiyagari/aiyagari_household.py
|
tuttugu-ryo/lecture-source-py
|
9ce84044c2cc421775ea63a004556d7ae3b4e504
|
[
"BSD-3-Clause"
] | 7
|
2017-06-30T01:52:46.000Z
|
2019-05-01T20:09:47.000Z
|
source/_static/code/aiyagari/aiyagari_household.py
|
tuttugu-ryo/lecture-source-py
|
9ce84044c2cc421775ea63a004556d7ae3b4e504
|
[
"BSD-3-Clause"
] | 117
|
2017-04-25T16:09:17.000Z
|
2022-03-23T02:30:29.000Z
|
import numpy as np
from numba import jit
class Household:
"""
This class takes the parameters that define a household asset accumulation
problem and computes the corresponding reward and transition matrices R
and Q required to generate an instance of DiscreteDP, and thereby solve
for the optimal policy.
Comments on indexing: We need to enumerate the state space S as a sequence
S = {0, ..., n}. To this end, (a_i, z_i) index pairs are mapped to s_i
indices according to the rule
s_i = a_i * z_size + z_i
To invert this map, use
a_i = s_i // z_size (integer division)
z_i = s_i % z_size
"""
def __init__(self,
r=0.01, # interest rate
w=1.0, # wages
β=0.96, # discount factor
a_min=1e-10,
Π=[[0.9, 0.1], [0.1, 0.9]], # Markov chain
z_vals=[0.1, 1.0], # exogenous states
a_max=18,
a_size=200):
# Store values, set up grids over a and z
self.r, self.w, self.β = r, w, β
self.a_min, self.a_max, self.a_size = a_min, a_max, a_size
self.Π = np.asarray(Π)
self.z_vals = np.asarray(z_vals)
self.z_size = len(z_vals)
self.a_vals = np.linspace(a_min, a_max, a_size)
self.n = a_size * self.z_size
# Build the array Q
self.Q = np.zeros((self.n, a_size, self.n))
self.build_Q()
# Build the array R
self.R = np.empty((self.n, a_size))
self.build_R()
def set_prices(self, r, w):
"""
Use this method to reset prices. Calling the method will trigger a
re-build of R.
"""
self.r, self.w = r, w
self.build_R()
def build_Q(self):
populate_Q(self.Q, self.a_size, self.z_size, self.Π)
def build_R(self):
self.R.fill(-np.inf)
populate_R(self.R, self.a_size, self.z_size, self.a_vals, self.z_vals, self.r, self.w)
# Do the hard work using JIT-ed functions
@jit(nopython=True)
def populate_R(R, a_size, z_size, a_vals, z_vals, r, w):
n = a_size * z_size
for s_i in range(n):
a_i = s_i // z_size
z_i = s_i % z_size
a = a_vals[a_i]
z = z_vals[z_i]
for new_a_i in range(a_size):
a_new = a_vals[new_a_i]
c = w * z + (1 + r) * a - a_new
if c > 0:
R[s_i, new_a_i] = np.log(c) # Utility
@jit(nopython=True)
def populate_Q(Q, a_size, z_size, Π):
n = a_size * z_size
for s_i in range(n):
z_i = s_i % z_size
for a_i in range(a_size):
for next_z_i in range(z_size):
Q[s_i, a_i, a_i * z_size + next_z_i] = Π[z_i, next_z_i]
@jit(nopython=True)
def asset_marginal(s_probs, a_size, z_size):
a_probs = np.zeros(a_size)
for a_i in range(a_size):
for z_i in range(z_size):
a_probs[a_i] += s_probs[a_i * z_size + z_i]
return a_probs
| 29.902913
| 94
| 0.548052
|
79534459aecf4d6d2f516997d6a7ef7c3927b1ef
| 3,099
|
py
|
Python
|
eero/eero.py
|
nguyenmp/eero-client
|
96c5350dd42f9e3479478fb2905100085de6ca01
|
[
"MIT"
] | 111
|
2017-10-20T14:46:59.000Z
|
2022-03-17T12:40:51.000Z
|
eero/eero.py
|
nguyenmp/eero-client
|
96c5350dd42f9e3479478fb2905100085de6ca01
|
[
"MIT"
] | 13
|
2017-11-30T23:56:34.000Z
|
2021-09-12T03:36:58.000Z
|
eero/eero.py
|
nguyenmp/eero-client
|
96c5350dd42f9e3479478fb2905100085de6ca01
|
[
"MIT"
] | 32
|
2017-10-23T17:26:23.000Z
|
2022-02-24T02:57:56.000Z
|
from .client import Client
from .exception import ClientException
import re
class Eero(object):
def __init__(self, session):
# type(SessionStorage) -> ()
self.session = session
self.client = Client()
@property
def _cookie_dict(self):
if self.needs_login():
return dict()
else:
return dict(s=self.session.cookie)
def needs_login(self):
return self.session.cookie is None
def login(self, identifier):
# type(string) -> string
json = dict(login=identifier)
data = self.client.post('login', json=json)
return data['user_token']
def login_verify(self, verification_code, user_token):
json = dict(code=verification_code)
response = self.client.post('login/verify', json=json,
cookies=dict(s=user_token))
self.session.cookie = user_token
return response
def refreshed(self, func):
try:
return func()
except ClientException as exception:
if (exception.status == 401
and exception.error_message == 'error.session.refresh'):
self.login_refresh()
return func()
else:
raise
def login_refresh(self):
response = self.client.post('login/refresh', cookies=self._cookie_dict)
self.session.cookie = response['user_token']
def account(self):
return self.refreshed(lambda: self.client.get(
'account',
cookies=self._cookie_dict))
def id_from_url(self, id_or_url):
match = re.search('^[0-9]+$', id_or_url)
if match:
return match.group(0)
match = re.search(r'\/([0-9]+)$', id_or_url)
if match:
return match.group(1)
def networks(self, network_id):
return self.refreshed(lambda: self.client.get(
'networks/{}'.format(
self.id_from_url(network_id)),
cookies=self._cookie_dict))
def devices(self, network_id):
return self.refreshed(lambda: self.client.get(
'networks/{}/devices'.format(
self.id_from_url(network_id)),
cookies=self._cookie_dict))
def eeros(self, network_id):
return self.refreshed(lambda: self.client.get(
'networks/{}/eeros'.format(
self.id_from_url(network_id)),
cookies=self._cookie_dict))
def reboot(self, device_id):
return self.refreshed(lambda: self.client.post(
'eeros/{}/reboot'.format(
self.id_from_url(device_id)),
cookies=self._cookie_dict))
| 35.62069
| 79
| 0.50597
|
795345197a69ddb717def7e02babb9caf970c41a
| 16,196
|
py
|
Python
|
#4_Misfit_Analysis.py
|
mshodge/FaultScarpAlgorithm
|
25ddc9b063705ceb941c1bbe00ffe2ac1bb107cb
|
[
"MIT"
] | null | null | null |
#4_Misfit_Analysis.py
|
mshodge/FaultScarpAlgorithm
|
25ddc9b063705ceb941c1bbe00ffe2ac1bb107cb
|
[
"MIT"
] | 1
|
2021-01-05T09:04:09.000Z
|
2021-01-05T09:04:09.000Z
|
#4_Misfit_Analysis.py
|
mshodge/FaultScarpAlgorithm
|
25ddc9b063705ceb941c1bbe00ffe2ac1bb107cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 14:02:54 2017 - v1.0 Finalised Fri Apr 13
@author: michaelhodge
"""
#A script to perform a misfit analysis between manual and algorithm methods
#to identify the best performing parameter space
#Loads packages required
import pickle
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
from Algorithm_misfit import algorithm_misfit
#Creates blank variables
prof_height_subsample=np.zeros((num_subsample,nump))
prof_distance_subsample=np.zeros((num_subsample,nump))
# Creates subsample of data for analysis
n=-1
for i in range (0,num_profiles,subsample):
n=n+1
prof_height_subsample[n,:]=prof_height[i,:]
prof_distance_subsample[n,:]=prof_distance[i,:]
iterations=num_subsample
print ('Running Algorithm on Sub Sampled Catalog of size %d, Please Wait...' % (iterations))
#Run smoothing and misfit analysis between subsampled data set
#Choose minimum and maximum filter bin size (bin_min, bin_max) and step between (bin_step).
#Choose minimum and maximum slope threshold (theta_T_min, theta_T_max) and step between (theta_T_step)
#Choose derivative of slope threshold (phi_T)
bin_max = 40
bin_min = 9 #needs to be an odd integer
bin_step = 4 #needs to be an even integer
theta_T_max = 40 #insert positive integer here, turns to negative later
theta_T_min = 7 #insert positive integer here, turns to negative later
theta_T_step = 4 #insert positive integer here, turns to negative later
phi_T = 5
#---IMPORTANT---
#Choose two types of filter method to compare: 1 - None; 2 - Average;
#3 - Sav Gol; 4 - Median; 5 - Lowess
#Comment out filters not needed
#---ANALYSIS 1----
#method = 1 #No smoothing
#method_name_1 = 'None'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (No Filter), Please Wait...')
#
#method = 2 #Average smoothing
#method_name_1 = 'Average'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Average Filter), Please Wait...')
method = 3 #Sav Gol smoothing
method_name_1 = 'Sav Gol'
analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
print ('Finished Analysis (Savitzky-Golay Filter), Please Wait...')
#method = 4 #Median smoothing
#method_name_1 = 'Median'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Median Filter), Please Wait...')
#
#method = 5 #Lowess smoothing
#method_name_1 = 'Lowess'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Lowess Filter), Please Wait...')
analysis_number_1=method
#---IMPORTANT---
#---ANALYSIS 2----
#method = 1 #No smoothing
#method_name_2 = 'None'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (No Filter), Please Wait...')
#
#method = 2 #Average smoothing
#method_name_2 = 'Average'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Average Filter), Please Wait...')
#
#method = 3 #Sav Gol smoothing
#method_name_2 = 'Sav Gol'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Savitzky-Golay Filter), Please Wait...')
#
#method = 4 #Median smoothing
#method_name_2 = 'Median'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Median Filter), Please Wait...')
method = 5 #Lowess smoothing
method_name_2 = 'Lowess'
analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
print ('Finished Analysis (Lowess Filter), Please Wait...')
analysis_number_2=method
#Output values for ANALYSIS 1
h_1=analysis_1[0] #scarp height
w_1=analysis_1[1] #scarp width
slope_1=analysis_1[2] #scarp slope
misfit_height_1=analysis_1[3] #misfit height
misfit_width_1=analysis_1[4] #misfit width
misfit_slope_1=analysis_1[5] #misfit slope
misfit_height_average_1=analysis_1[6] #average misfit height
misfit_width_average_1=analysis_1[7] #average misfit width
misfit_slope_average_1=analysis_1[8] #average misfit slope
#Output values for ANALYSIS 2
h_2=analysis_2[0] #scarp height
w_2=analysis_2[1] #scarp width
slope_2=analysis_2[2] #scarp slope
misfit_height_2=analysis_2[3] #misfit height
misfit_width_2=analysis_2[4] #misfit width
misfit_slope_2=analysis_2[5] #misfit slope
misfit_height_average_2=analysis_2[6] #average misfit height
misfit_width_average_2=analysis_2[7] #average misfit width
misfit_slope_average_2=analysis_2[8] #average misfit slope
#Grid setup
gridx=analysis_1[9]
gridy=analysis_1[10]
#Dump save analysis
with open('Misfit_Analysis.pickle', 'wb') as f:
pickle.dump(h_1, f)
pickle.dump(h_2, f)
pickle.dump(w_1, f)
pickle.dump(w_2, f)
pickle.dump(slope_1, f)
pickle.dump(slope_2, f)
pickle.dump(misfit_height_1, f)
pickle.dump(misfit_height_2, f)
pickle.dump(misfit_width_1, f)
pickle.dump(misfit_width_2, f)
pickle.dump(misfit_slope_1, f)
pickle.dump(misfit_slope_2, f)
pickle.dump(misfit_height_average_1, f)
pickle.dump(misfit_height_average_2, f)
pickle.dump(misfit_width_average_1, f)
pickle.dump(misfit_width_average_2, f)
pickle.dump(misfit_slope_average_1, f)
pickle.dump(misfit_slope_average_2, f)
pickle.dump(gridx, f)
pickle.dump(gridy, f)
#Count the number of samples where scarp parameter was calculated
misfit_height_1_min=np.zeros((iterations,1))
misfit_height_2_min=np.zeros((iterations,1))
misfit_height_1_count=np.zeros((len(misfit_height_average_1[:,1]),(len(misfit_height_average_1[1,:]))))
misfit_height_2_count=np.zeros((len(misfit_height_average_2[:,1]),(len(misfit_height_average_2[1,:]))))
for i in range (0,iterations):
misfit_height_1_min[i]=np.ndarray.min(abs(misfit_height_1[i,:,:]))
misfit_height_2_min[i]=np.ndarray.min(abs(misfit_height_2[i,:,:]))
misfit_height_1_count_all=np.count_nonzero(~np.isnan(misfit_height_1_min))
misfit_height_2_count_all=np.count_nonzero(~np.isnan(misfit_height_2_min))
for m in range (0,(len(misfit_height_average_1[:,1]))):
for n in range (0,(len(misfit_height_average_1[1,:]))):
misfit_height_1_count[m,n]=np.count_nonzero(~np.isnan(misfit_height_1[:,m,n]))
for m in range (0,(len(misfit_height_average_1[:,1]))):
for n in range (0,(len(misfit_height_average_1[1,:]))):
misfit_height_2_count[m,n]=np.count_nonzero(~np.isnan(misfit_height_2[:,m,n]))
#Determining the best parameter space
value = 0.0
count_min=0.5 #Minimum number of successful profiles (normally 50% or 0.5)
A=(abs(misfit_height_average_1)+abs(misfit_width_average_1)+abs(misfit_slope_average_1))/(misfit_height_1_count/num_subsample)
where_are_NaNs = np.isnan(A)
A[where_are_NaNs] = 9999
where_less_than_mincount=misfit_height_1_count/num_subsample<count_min
A[where_less_than_mincount] = 9999
X_1 = np.abs(A-value)
idx_1 = np.where( X_1 == X_1.min() )
B=(abs(misfit_height_average_2)+abs(misfit_width_average_2)+abs(misfit_slope_average_2))/(misfit_height_2_count/num_subsample)
where_are_NaNs = np.isnan(B)
B[where_are_NaNs] = 9999
where_less_than_mincount=misfit_height_2_count/num_subsample<count_min
B[where_less_than_mincount] = 9999
X_2 = np.abs(B-value)
idx_2 = np.where( X_2 == X_2.min() )
#Prints out the best parameter space as 'Method name (i.e., Sav Gol,), average height misfit, average width misfit, average slope misfit, count, slope threshold, bin size'
if abs(A[idx_1[0], idx_1[1]])<abs(B[idx_2[0], idx_2[1]]):
print('Best Parameter Space:')
print('method = %s' %method_name_1)
print('bin size = %s' %gridy[idx_1[0], idx_1[1]])
print('slope threshold = %s' %gridx[idx_1[0], idx_1[1]])
print('average misfit height (m) = %s' %misfit_height_average_1[idx_1[0], idx_1[1]])
print('average misfit width (m) = %s' %misfit_width_average_1[idx_1[0], idx_1[1]])
print('average misfit slope (degrees) = %s' %misfit_slope_average_1[idx_1[0], idx_1[1]])
print('misfit count = %s' %misfit_height_1_count[idx_1[0], idx_1[1]])
method=analysis_number_1
theta_T=np.int(gridx[idx_1[0], idx_1[1]])
idx_theta=np.int(idx_1[1])
bin_size=np.int(gridy[idx_1[0], idx_1[1]])
idx_b=np.int(idx_1[0])
else:
print('Best Parameter Space:')
print('method = %s' %method_name_2)
print('bin size = %s' %gridy[idx_2[0], idx_2[1]])
print('slope threshold = %s' %gridx[idx_2[0], idx_2[1]])
print('average misfit height (m) = %s' %misfit_height_average_2[idx_2[0], idx_2[1]])
print('average misfit width (m) = %s' %misfit_width_average_2[idx_2[0], idx_2[1]])
print('average misfit slope (degrees) = %s' %misfit_slope_average_2[idx_2[0], idx_2[1]])
print('misfit count = %s' %misfit_height_2_count[idx_2[0], idx_2[1]])
method=analysis_number_2
theta_T=np.int(gridx[idx_2[0], idx_2[1]])
idx_theta=np.int(idx_2[1])
bin_size=np.int(gridy[idx_2[0], idx_2[1]])
idx_b=np.int(idx_2[0])
###
#Set levels for misfit plots
levels_height=[-10, -7.5, -5, -2.5, 0, 2.5, 5, 7.5, 10]
levels_width=[-20, -15, -5, 0, 5, 10, 15, 20]
levels_slope=[-40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40]
levels_count=[2,4,6,8,10,12,14,16,18,20]
#Plot figures
plt.figure(3)
#Plot for analysis number 1
plt.subplot(4,2,1)
plt.contourf(gridx,gridy,misfit_height_average_1,levels_height,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit height (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit height using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,3)
plt.contourf(gridx,gridy,misfit_width_average_1,levels_width,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit width (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit width using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,5)
plt.contourf(gridx,gridy,misfit_slope_average_1,levels_slope,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit slope ($^\circ$)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit slope using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,7)
cmap = plt.cm.get_cmap("winter")
plt.contourf(gridx,gridy,misfit_height_1_count,levels_count,cmap=cmap, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit count', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.xlabel('$\mathit{b}$ value (m)', fontsize=8)
plt.ylabel('${\\theta}_T$ ($^\circ$)', fontsize=8)
plt.title('Misfit count using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
#Plot for analysis number 2
plt.subplot(4,2,2)
plt.contourf(gridx,gridy,misfit_height_average_2,levels_height,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit height (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit height using %s filter' %method_name_2, fontsize=8)
#plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,4)
plt.contourf(gridx,gridy,misfit_width_average_2,levels_width,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit width (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit width using %s filter' %method_name_2, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,6)
plt.contourf(gridx,gridy,misfit_slope_average_2,levels_slope,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit slope ($^\circ$)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit slope using %s filter' %method_name_2, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,8)
cmap = plt.cm.get_cmap("winter")
plt.contourf(gridx,gridy,misfit_height_2_count,levels_count,cmap=cmap, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit count', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit count using %s filter' %method_name_2, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.tight_layout()
if method==analysis_number_1:
h_subsample=h_1[:,idx_b,idx_theta]
w_subsample=w_1[:,idx_b,idx_theta]
slope_subsample=slope_1[:,idx_b,idx_theta]
else:
h_subsample=h_2[:,idx_b,idx_theta]
w_subsample=w_2[:,idx_b,idx_theta]
slope_subsample=slope_2[:,idx_b,idx_theta]
#Plot against manual plot
plt.figure(4) #plot manual data
plt.subplot(3,1,1)
plt.scatter(dist_along_fault,h_manual,s=5,color='black')
plt.scatter(dist_along_fault,h_subsample,s=5,color='red')
plt.ylabel('Scarp Height (m)', fontsize=8)
plt.title('Manual (black) v Algorithm (red) Scarp Height Profile', fontsize=8)
#plt.ylim([0, np.int(math.ceil(np.amax(h_manual)/10.0))*10])
plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(3,1,2)
plt.scatter(dist_along_fault,w_manual,s=5,color='black')
plt.scatter(dist_along_fault,w_subsample,s=5,color='red')
plt.ylabel('Scarp Width (m)', fontsize=8)
plt.title('Manual (black) v Algorithm (red) Scarp Width Profile', fontsize=8)
#plt.ylim([0, np.int(math.ceil(np.amax(w_manual)/10.0))*10])
plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(3,1,3)
plt.scatter(dist_along_fault,slope_manual,s=5,color='black')
plt.scatter(dist_along_fault,slope_subsample,s=5,color='red')
plt.xlabel('Distance along fault (km)', fontsize=8)
plt.ylabel('Scarp Slope ($^\circ$)', fontsize=8)
plt.title('Manual (black) v Algorithm (red) Scarp Slope Profile', fontsize=8)
plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
#plt.ylim([(np.int(math.ceil(np.amin(slope_manual)/10.0))*10)-10,0])
### END
| 39.502439
| 197
| 0.756421
|
79534529aa317c5d0f92f6d91d078b8adba9eaa6
| 17,746
|
py
|
Python
|
airspider/spider.py
|
AirSpiders/AirSpider
|
a56e4b1c640e19113b2b078c9a8e7f3a02b2f721
|
[
"Apache-2.0"
] | 21
|
2020-03-20T09:01:24.000Z
|
2021-06-30T02:00:56.000Z
|
airspider/spider.py
|
LRENZ/AirSpider
|
a56e4b1c640e19113b2b078c9a8e7f3a02b2f721
|
[
"Apache-2.0"
] | null | null | null |
airspider/spider.py
|
LRENZ/AirSpider
|
a56e4b1c640e19113b2b078c9a8e7f3a02b2f721
|
[
"Apache-2.0"
] | 6
|
2020-03-30T09:24:22.000Z
|
2020-10-30T16:45:02.000Z
|
#!/usr/bin/env python
import asyncio
import collections
import typing
import weakref
from datetime import datetime
from functools import reduce
from inspect import isawaitable
from signal import SIGINT, SIGTERM
from types import AsyncGeneratorType
from aiohttp import ClientSession
from airspider.exceptions import (
InvalidCallbackResult,
NotImplementedParseError,
NothingMatchedError,
)
from airspider.item import Item
from airspider.exceptions import SpiderHookError
from airspider.middleware import Middleware
from airspider.request import Request
from airspider.response import Response
from airspider.utils import get_logger
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class SpiderHook:
"""
SpiderHook is used for extend spider
"""
callback_result_map: dict = None
async def _run_spider_hook(self, hook_func):
"""
Run hook before/after spider start crawling
:param hook_func: aws function
:return:
"""
if callable(hook_func):
try:
aws_hook_func = hook_func(weakref.proxy(self))
if isawaitable(aws_hook_func):
await aws_hook_func
except Exception as e:
raise SpiderHookError(f"<Hook {hook_func.__name__}: {e}")
async def process_failed_response(self, request, response):
"""
Corresponding processing for the failed response
:param request: Request
:param response: Response
:return:
"""
pass
async def process_succeed_response(self, request, response):
"""
Corresponding processing for the succeed response
:param request: Request
:param response: Response
:return:
"""
pass
async def process_item(self, item):
"""
Corresponding processing for the Item type
:param item: Item
:return:
"""
pass
async def process_callback_result(self, callback_result):
"""
Corresponding processing for the invalid callback result
:param callback_result: Custom instance
:return:
"""
callback_result_name = type(callback_result).__name__
process_func_name = self.callback_result_map.get(callback_result_name, "")
process_func = getattr(self, process_func_name, None)
if process_func is not None:
await process_func(callback_result)
else:
raise InvalidCallbackResult(
f"<Parse invalid callback result type: {callback_result_name}>"
)
class Spider(SpiderHook):
"""
Spider is used for control requests better
"""
name = "AirSpider"
request_config = None
# Default values passing to each request object. Not implemented yet.
headers: dict = None
metadata: dict = None
aiohttp_kwargs: dict = None
# Some fields for statistics
failed_counts: int = 0
success_counts: int = 0
# Concurrency control
worker_numbers: int = 2
concurrency: int = 3
# Spider entry
start_urls: list = None
# A queue to save coroutines
worker_tasks: list = []
def __init__(
self,
middleware: typing.Union[typing.Iterable, Middleware] = None,
loop=None,
is_async_start: bool = False,
cancel_tasks: bool = True,
**spider_kwargs,
):
"""
Init spider object.
:param middleware: a list of or a single Middleware
:param loop: asyncio event llo
:param is_async_start: start spider by using async
:param spider_kwargs
"""
if not self.start_urls or not isinstance(self.start_urls, collections.Iterable):
raise ValueError(
"AirSpider spider must have a param named start_urls, eg: start_urls = ['https://www.github.com']"
)
self.loop = loop
asyncio.set_event_loop(self.loop)
# Init object-level properties
self.callback_result_map = self.callback_result_map or {}
self.request_config = self.request_config or {}
self.headers = self.headers or {}
self.metadata = self.metadata or {}
self.aiohttp_kwargs = self.aiohttp_kwargs or {}
self.spider_kwargs = spider_kwargs
self.request_config = self.request_config or {}
self.request_session = ClientSession()
self.cancel_tasks = cancel_tasks
self.is_async_start = is_async_start
# set logger
self.logger = get_logger(name=self.name)
# customize middleware
if isinstance(middleware, list):
self.middleware = reduce(lambda x, y: x + y, middleware)
else:
self.middleware = middleware or Middleware()
# async queue as a producer
self.request_queue = asyncio.Queue()
# semaphore, used for concurrency control
self.sem = asyncio.Semaphore(self.concurrency)
async def _cancel_tasks(self):
tasks = []
for task in asyncio.Task.all_tasks():
if task is not asyncio.tasks.Task.current_task():
tasks.append(task)
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
async def _process_async_callback(
self, callback_results: AsyncGeneratorType, response: Response = None
):
try:
async for callback_result in callback_results:
if isinstance(callback_result, AsyncGeneratorType):
await self._process_async_callback(callback_result)
elif isinstance(callback_result, Request):
self.request_queue.put_nowait(
self.handle_request(request=callback_result)
)
elif isinstance(callback_result, typing.Coroutine):
self.request_queue.put_nowait(
self.handle_callback(
aws_callback=callback_result, response=response
)
)
elif isinstance(callback_result, Item):
# Process target item
await self.process_item(callback_result)
else:
await self.process_callback_result(callback_result=callback_result)
except NothingMatchedError as e:
error_info = f"<Field: {str(e).lower()}" + f", error url: {response.url}>"
self.logger.error(error_info)
except Exception as e:
self.logger.error(e)
async def _process_response(self, request: Request, response: Response):
if response:
if response.ok:
# Process succeed response
self.success_counts += 1
await self.process_succeed_response(request, response)
else:
# Process failed response
self.failed_counts += 1
await self.process_failed_response(request, response)
async def _run_request_middleware(self, request: Request):
if self.middleware.request_middleware:
for middleware in self.middleware.request_middleware:
if callable(middleware):
try:
aws_middleware_func = middleware(self, request)
if isawaitable(aws_middleware_func):
await aws_middleware_func
else:
self.logger.error(
f"<Middleware {middleware.__name__}: must be a coroutine function"
)
except Exception as e:
self.logger.error(f"<Middleware {middleware.__name__}: {e}")
async def _run_response_middleware(self, request: Request, response: Response):
if self.middleware.response_middleware:
for middleware in self.middleware.response_middleware:
if callable(middleware):
try:
aws_middleware_func = middleware(self, request, response)
if isawaitable(aws_middleware_func):
await aws_middleware_func
else:
self.logger.error(
f"<Middleware {middleware.__name__}: must be a coroutine function"
)
except Exception as e:
self.logger.error(f"<Middleware {middleware.__name__}: {e}")
async def _start(self, after_start=None, before_stop=None):
self.logger.info("Spider started!")
start_time = datetime.now()
# Add signal
for signal in (SIGINT, SIGTERM):
try:
self.loop.add_signal_handler(
signal, lambda: asyncio.ensure_future(self.stop(signal))
)
except NotImplementedError:
self.logger.warning(
f"{self.name} tried to use loop.add_signal_handler "
"but it is not implemented on this platform."
)
# Run hook before spider start crawling
await self._run_spider_hook(after_start)
# Actually run crawling
try:
await self.start_master()
finally:
# Run hook after spider finished crawling
await self._run_spider_hook(before_stop)
await self.request_session.close()
# Display logs about this crawl task
end_time = datetime.now()
self.logger.info(
f"Total requests: {self.failed_counts + self.success_counts}"
)
if self.failed_counts:
self.logger.info(f"Failed requests: {self.failed_counts}")
self.logger.info(f"Time usage: {end_time - start_time}")
self.logger.info("Spider finished!")
@classmethod
async def async_start(
cls,
middleware: typing.Union[typing.Iterable, Middleware] = None,
loop=None,
after_start=None,
before_stop=None,
cancel_tasks: bool = True,
**spider_kwargs,
):
"""
Start an async spider
:param middleware: customize middleware or a list of middleware
:param loop:
:param after_start: hook
:param before_stop: hook
:param cancel_tasks: cancel async tasks
:param spider_kwargs: Additional keyword args to initialize spider
:return: An instance of :cls:`Spider`
"""
loop = loop or asyncio.get_event_loop()
spider_ins = cls(
middleware=middleware,
loop=loop,
is_async_start=True,
cancel_tasks=cancel_tasks,
**spider_kwargs,
)
await spider_ins._start(after_start=after_start, before_stop=before_stop)
return spider_ins
@classmethod
def start(
cls,
middleware: typing.Union[typing.Iterable, Middleware] = None,
loop=None,
after_start=None,
before_stop=None,
close_event_loop=True,
**spider_kwargs,
):
"""
Start a spider
:param after_start: hook
:param before_stop: hook
:param middleware: customize middleware or a list of middleware
:param loop: event loop
:param close_event_loop: bool
:param spider_kwargs: Additional keyword args to initialize spider
:return: An instance of :cls:`Spider`
"""
loop = loop or asyncio.new_event_loop()
spider_ins = cls(middleware=middleware, loop=loop, **spider_kwargs)
# Actually start crawling
spider_ins.loop.run_until_complete(
spider_ins._start(after_start=after_start, before_stop=before_stop)
)
spider_ins.loop.run_until_complete(spider_ins.loop.shutdown_asyncgens())
if close_event_loop:
spider_ins.loop.close()
return spider_ins
async def handle_callback(self, aws_callback: typing.Coroutine, response):
"""Process coroutine callback function"""
callback_result = None
try:
callback_result = await aws_callback
except NothingMatchedError as e:
self.logger.error(f"<Item: {str(e).lower()}>")
except Exception as e:
self.logger.error(f"<Callback[{aws_callback.__name__}]: {e}")
return callback_result, response
async def handle_request(
self, request: Request
) -> typing.Tuple[AsyncGeneratorType, Response]:
"""
Wrap request with middleware.
:param request:
:return:
"""
callback_result, response = None, None
try:
await self._run_request_middleware(request)
callback_result, response = await request.fetch_callback(self.sem)
await self._run_response_middleware(request, response)
await self._process_response(request=request, response=response)
except NotImplementedParseError as e:
self.logger.error(e)
except NothingMatchedError as e:
error_info = f"<Field: {str(e).lower()}" + f", error url: {request.url}>"
self.logger.error(error_info)
except Exception as e:
self.logger.error(f"<Callback[{request.callback.__name__}]: {e}")
return callback_result, response
async def multiple_request(self, urls, is_gather=False, **kwargs):
"""For crawling multiple urls"""
if is_gather:
resp_results = await asyncio.gather(
*[self.handle_request(self.request(url=url, **kwargs)) for url in urls],
return_exceptions=True,
)
for index, task_result in enumerate(resp_results):
if not isinstance(task_result, RuntimeError) and task_result:
_, response = task_result
response.index = index
yield response
else:
for index, url in enumerate(urls):
_, response = await self.handle_request(self.request(url=url, **kwargs))
response.index = index
yield response
async def parse(self, response):
"""
Used for subclasses, directly parse the responses corresponding with start_urls
:param response: Response
:return:
"""
raise NotImplementedParseError("<!!! parse function is expected !!!>")
async def process_start_urls(self):
"""
Process the start URLs
:return: AN async iterator
"""
for url in self.start_urls:
yield self.request(url=url, callback=self.parse, metadata=self.metadata)
def request(
self,
url: str,
method: str = "GET",
*,
callback=None,
encoding: typing.Optional[str] = None,
headers: dict = None,
metadata: dict = None,
request_config: dict = None,
request_session=None,
**aiohttp_kwargs,
):
"""Init a Request class for crawling html"""
headers = headers or {}
metadata = metadata or {}
request_config = request_config or {}
request_session = request_session or self.request_session
headers.update(self.headers.copy())
request_config.update(self.request_config.copy())
aiohttp_kwargs.update(self.aiohttp_kwargs.copy())
return Request(
url=url,
method=method,
callback=callback,
encoding=encoding,
headers=headers,
metadata=metadata,
request_config=request_config,
request_session=request_session,
**aiohttp_kwargs,
)
async def start_master(self):
"""Actually start crawling."""
async for request_ins in self.process_start_urls():
self.request_queue.put_nowait(self.handle_request(request_ins))
workers = [
asyncio.ensure_future(self.start_worker())
for i in range(self.worker_numbers)
]
for worker in workers:
self.logger.info(f"Worker started: {id(worker)}")
await self.request_queue.join()
if not self.is_async_start:
await self.stop(SIGINT)
else:
if self.cancel_tasks:
await self._cancel_tasks()
async def start_worker(self):
while True:
request_item = await self.request_queue.get()
self.worker_tasks.append(request_item)
if self.request_queue.empty():
results = await asyncio.gather(
*self.worker_tasks, return_exceptions=True
)
for task_result in results:
if not isinstance(task_result, RuntimeError) and task_result:
callback_results, response = task_result
if isinstance(callback_results, AsyncGeneratorType):
await self._process_async_callback(
callback_results, response
)
self.worker_tasks = []
self.request_queue.task_done()
async def stop(self, _signal):
"""
Finish all running tasks, cancel remaining tasks, then stop loop.
:param _signal:
:return:
"""
self.logger.info(f"Stopping spider: {self.name}")
await self._cancel_tasks()
self.loop.stop()
| 34.86444
| 114
| 0.594162
|
7953453359fb27d68a7c993da18092d61f2e0702
| 733
|
py
|
Python
|
bookstore/books/signals.py
|
M0673N/bookstore
|
ec9477550ba46f9ffde3817cf676e97b0239263d
|
[
"MIT"
] | null | null | null |
bookstore/books/signals.py
|
M0673N/bookstore
|
ec9477550ba46f9ffde3817cf676e97b0239263d
|
[
"MIT"
] | null | null | null |
bookstore/books/signals.py
|
M0673N/bookstore
|
ec9477550ba46f9ffde3817cf676e97b0239263d
|
[
"MIT"
] | null | null | null |
from bookstore.books.models import Book
import cloudinary.uploader
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
@receiver(pre_save, sender=Book)
def delete_old_book_image_on_change(sender, instance, **kwargs):
try:
old_image = sender.objects.get(pk=instance.pk).image
if old_image:
new_image = instance.image
if not old_image == new_image:
cloudinary.uploader.destroy(old_image.public_id)
except instance.DoesNotExist:
return
@receiver(pre_delete, sender=Book)
def image_delete_on_book_delete(sender, instance, **kwargs):
if instance.image:
cloudinary.uploader.destroy(instance.image.public_id)
| 31.869565
| 64
| 0.729877
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.