hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b1d964e26ac4a5763f230df2788c94fa9547e51
| 69,766
|
py
|
Python
|
salt/modules/bigip.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:21.000Z
|
2020-01-02T09:03:21.000Z
|
salt/modules/bigip.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/bigip.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
# -*- coding: utf-8 -*-
'''
An execution module which can manipulate an f5 bigip via iControl REST
:maturity: develop
:platform: f5_bigip_11.6
'''
# Import python libs
from __future__ import absolute_import
import json
import logging as logger
# Import third party libs
try:
import requests
import requests.exceptions
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Import 3rd-party libs
from salt.ext import six
# Import salt libs
import salt.utils
import salt.output
import salt.exceptions
# Setup the logger
log = logger.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'bigip'
def __virtual__():
'''
Only return if requests is installed
'''
if HAS_LIBS:
return __virtualname__
return (False, 'The bigip execution module cannot be loaded: '
'python requests library not available.')
BIG_IP_URL_BASE = 'https://{host}/mgmt/tm'
def _build_session(username, password, trans_label=None):
'''
Create a session to be used when connecting to iControl REST.
'''
bigip = requests.session()
bigip.auth = (username, password)
bigip.verify = False
bigip.headers.update({'Content-Type': 'application/json'})
if trans_label:
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=trans_label))
if trans_id:
bigip.headers.update({'X-F5-REST-Coordination-Id': trans_id})
else:
bigip.headers.update({'X-F5-REST-Coordination-Id': None})
return bigip
def _load_response(response):
'''
Load the response from json data, return the dictionary or raw text
'''
try:
data = json.loads(response.text)
except ValueError:
data = response.text
ret = {'code': response.status_code, 'content': data}
return ret
def _load_connection_error(hostname, error):
'''
Format and Return a connection error
'''
ret = {'code': None, 'content': 'Error: Unable to connect to the bigip device: {host}\n{error}'.format(host=hostname, error=error)}
return ret
def _loop_payload(params):
'''
Pass in a dictionary of parameters, loop through them and build a payload containing,
parameters who's values are not None.
'''
#construct the payload
payload = {}
#set the payload
for param, value in six.iteritems(params):
if value is not None:
payload[param] = value
return payload
def _build_list(option_value, item_kind):
'''
pass in an option to check for a list of items, create a list of dictionary of items to set
for this option
'''
#specify profiles if provided
if option_value is not None:
items = []
#if user specified none, return an empty list
if option_value == 'none':
return items
#was a list already passed in?
if not isinstance(option_value, list):
values = option_value.split(',')
else:
values = option_value
for value in values:
# sometimes the bigip just likes a plain ol list of items
if item_kind is None:
items.append(value)
# other times it's picky and likes key value pairs...
else:
items.append({'kind': item_kind, 'name': value})
return items
return None
def _determine_toggles(payload, toggles):
'''
BigIP can't make up its mind if it likes yes / no or true or false.
Figure out what it likes to hear without confusing the user.
'''
for toggle, definition in six.iteritems(toggles):
#did the user specify anything?
if definition['value'] is not None:
#test for yes_no toggle
if (definition['value'] is True or definition['value'] == 'yes') and definition['type'] == 'yes_no':
payload[toggle] = 'yes'
elif (definition['value'] is False or definition['value'] == 'no') and definition['type'] == 'yes_no':
payload[toggle] = 'no'
#test for true_false toggle
if (definition['value'] is True or definition['value'] == 'yes') and definition['type'] == 'true_false':
payload[toggle] = True
elif (definition['value'] is False or definition['value'] == 'no') and definition['type'] == 'true_false':
payload[toggle] = False
return payload
def _set_value(value):
'''
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
'''
logger.error(value)
#don't continue if already an acceptable data-type
if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list):
return value
#check if json
if value.startswith('j{') and value.endswith('}j'):
value = value.replace('j{', '{')
value = value.replace('}j', '}')
try:
return json.loads(value)
except Exception:
raise salt.exceptions.CommandExecutionError
#detect list of dictionaries
if '|' in value and r'\|' not in value:
values = value.split('|')
items = []
for value in values:
items.append(_set_value(value))
return items
#parse out dictionary if detected
if ':' in value and r'\:' not in value:
options = {}
#split out pairs
key_pairs = value.split(',')
for key_pair in key_pairs:
k = key_pair.split(':')[0]
v = key_pair.split(':')[1]
options[k] = v
return options
#try making a list
elif ',' in value and r'\,' not in value:
value_items = value.split(',')
return value_items
#just return a string
else:
#remove escape chars if added
if r'\|' in value:
value = value.replace(r'\|', '|')
if r'\:' in value:
value = value.replace(r'\:', ':')
if r'\,' in value:
value = value.replace(r'\,', ',')
return value
def start_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and start a new transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
The name / alias for this transaction. The actual transaction
id will be stored within a grain called ``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.start_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
payload = {}
#post to REST to get trans id
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/transaction', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
#extract the trans_id
data = _load_response(response)
if data['code'] == 200:
trans_id = data['content']['transId']
__salt__['grains.setval']('bigip_f5_trans', {label: trans_id})
return 'Transaction: {trans_id} - has successfully been stored in the grain: bigip_f5_trans:{label}'.format(trans_id=trans_id,
label=label)
else:
return data
def list_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and list an existing transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
the label of this transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.list_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
#post to REST to get trans id
try:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}/commands'.format(trans_id=trans_id))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
' bigip.start_transaction function'
def commit_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and commit an existing transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
the label of this transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.commit_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
payload = {}
payload['state'] = 'VALIDATING'
#patch to REST to get trans id
try:
response = bigip_session.patch(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}'.format(trans_id=trans_id), data=json.dumps(payload))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
' bigip.start_transaction function'
def delete_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and delete an existing transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
The label of this transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.delete_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
#patch to REST to get trans id
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}'.format(trans_id=trans_id))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
' bigip.start_transaction function'
def list_node(hostname, username, password, name=None, trans_label=None):
'''
A function to connect to a bigip device and list all nodes or a specific node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to list. If no name is specified than all nodes
will be listed.
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.list_node bigip admin admin my-node
'''
#build sessions
bigip_session = _build_session(username, password, trans_label)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node')
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_node(hostname, username, password, name, address, trans_label=None):
'''
A function to connect to a bigip device and create a node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node
address
The address of the node
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2
'''
#build session
bigip_session = _build_session(username, password, trans_label)
#construct the payload
payload = {}
payload['name'] = name
payload['address'] = address
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_node(hostname, username, password, name,
connection_limit=None,
description=None,
dynamic_ratio=None,
logging=None,
monitor=None,
rate_limit=None,
ratio=None,
session=None,
state=None,
trans_label=None):
'''
A function to connect to a bigip device and modify an existing node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to modify
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
logging
[enabled | disabled]
monitor
[[name] | none | default]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
state
[user-down | user-up ]
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled
'''
params = {
'connection-limit': connection_limit,
'description': description,
'dynamic-ratio': dynamic_ratio,
'logging': logging,
'monitor': monitor,
'rate-limit': rate_limit,
'ratio': ratio,
'session': session,
'state': state,
}
#build session
bigip_session = _build_session(username, password, trans_label)
#build payload
payload = _loop_payload(params)
payload['name'] = name
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_node(hostname, username, password, name, trans_label=None):
'''
A function to connect to a bigip device and delete a specific node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node which will be deleted.
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-node
'''
#build session
bigip_session = _build_session(username, password, trans_label)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_pool(hostname, username, password, name=None):
'''
A function to connect to a bigip device and list all pools or a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to list. If no name is specified then all pools
will be listed.
CLI Example::
salt '*' bigip.list_pool bigip admin admin my-pool
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/?expandSubcollections=true'.format(name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool')
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_pool(hostname, username, password, name, members=None,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None):
'''
A function to connect to a bigip device and create a pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to create.
members
List of comma delimited pool members to add to the pool.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[enabled | disabled]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_depth_limit
[integer]
queue_on_connection_limit
[enabled | disabled]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
CLI Example::
salt '*' bigip.create_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 monitor=http
'''
params = {
'description': description,
'gateway-failsafe-device': gateway_failsafe_device,
'ignore-persisted-weight': ignore_persisted_weight,
'ip-tos-to-client': ip_tos_to_client,
'ip-tos-to-server': ip_tos_to_server,
'link-qos-to-client': link_qos_to_client,
'link-qos-to-server': link_qos_to_server,
'load-balancing-mode': load_balancing_mode,
'min-active-members': min_active_members,
'min-up-members': min_up_members,
'min-up-members-action': min_up_members_action,
'min-up-members-checking': min_up_members_checking,
'monitor': monitor,
'profiles': profiles,
'queue-on-connection-limit': queue_on_connection_limit,
'queue-depth-limit': queue_depth_limit,
'queue-time-limit': queue_time_limit,
'reselect-tries': reselect_tries,
'service-down-action': service_down_action,
'slow-ramp-time': slow_ramp_time
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'allow-nat': {'type': 'yes_no', 'value': allow_nat},
'allow-snat': {'type': 'yes_no', 'value': allow_snat}
}
#build payload
payload = _loop_payload(params)
payload['name'] = name
#determine toggles
payload = _determine_toggles(payload, toggles)
#specify members if provided
if members is not None:
payload['members'] = _build_list(members, 'ltm:pool:members')
#build session
bigip_session = _build_session(username, password)
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_pool(hostname, username, password, name,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None):
'''
A function to connect to a bigip device and modify an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify.
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[yes | no]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_on_connection_limit
[enabled | disabled]
queue_depth_limit
[integer]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
CLI Example::
salt '*' bigip.modify_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 min_active_members=1
'''
params = {
'description': description,
'gateway-failsafe-device': gateway_failsafe_device,
'ignore-persisted-weight': ignore_persisted_weight,
'ip-tos-to-client': ip_tos_to_client,
'ip-tos-to-server': ip_tos_to_server,
'link-qos-to-client': link_qos_to_client,
'link-qos-to-server': link_qos_to_server,
'load-balancing-mode': load_balancing_mode,
'min-active-members': min_active_members,
'min-up-members': min_up_members,
'min-up_members-action': min_up_members_action,
'min-up-members-checking': min_up_members_checking,
'monitor': monitor,
'profiles': profiles,
'queue-on-connection-limit': queue_on_connection_limit,
'queue-depth-limit': queue_depth_limit,
'queue-time-limit': queue_time_limit,
'reselect-tries': reselect_tries,
'service-down-action': service_down_action,
'slow-ramp-time': slow_ramp_time
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'allow-nat': {'type': 'yes_no', 'value': allow_nat},
'allow-snat': {'type': 'yes_no', 'value': allow_snat}
}
#build payload
payload = _loop_payload(params)
payload['name'] = name
#determine toggles
payload = _determine_toggles(payload, toggles)
#build session
bigip_session = _build_session(username, password)
#post to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_pool(hostname, username, password, name):
'''
A function to connect to a bigip device and delete a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-pool
'''
#build session
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def replace_pool_members(hostname, username, password, name, members):
'''
A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
List of comma delimited pool members to replace existing members with.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80
'''
payload = {}
payload['name'] = name
#specify members if provided
if members is not None:
if isinstance(members, six.string_types):
members = members.split(',')
pool_members = []
for member in members:
#check to see if already a dictionary ( for states)
if isinstance(member, dict):
#check for state alternative name 'member_state', replace with state
if 'member_state' in member.keys():
member['state'] = member.pop('member_state')
#replace underscore with dash
for key in member:
new_key = key.replace('_', '-')
member[new_key] = member.pop(key)
pool_members.append(member)
#parse string passed via execution command (for executions)
else:
pool_members.append({'name': member, 'address': member.split(':')[0]})
payload['members'] = pool_members
#build session
bigip_session = _build_session(username, password)
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def add_pool_member(hostname, username, password, name, member):
'''
A function to connect to a bigip device and add a new member to an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The name of the member to add
i.e. 10.1.1.2:80
CLI Example:
.. code-block:: bash
salt '*' bigip.add_pool_members bigip admin admin my-pool 10.2.2.1:80
'''
# for states
if isinstance(member, dict):
#check for state alternative name 'member_state', replace with state
if 'member_state' in member.keys():
member['state'] = member.pop('member_state')
#replace underscore with dash
for key in member:
new_key = key.replace('_', '-')
member[new_key] = member.pop(key)
payload = member
# for execution
else:
payload = {'name': member, 'address': member.split(':')[0]}
#build session
bigip_session = _build_session(username, password)
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_pool_member(hostname, username, password, name, member,
connection_limit=None,
description=None,
dynamic_ratio=None,
inherit_profile=None,
logging=None,
monitor=None,
priority_group=None,
profiles=None,
rate_limit=None,
ratio=None,
session=None,
state=None):
'''
A function to connect to a bigip device and modify an existing member of a pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The name of the member to modify i.e. 10.1.1.2:80
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
inherit_profile
[enabled | disabled]
logging
[enabled | disabled]
monitor
[name]
priority_group
[integer]
profiles
[none | profile_name]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
state
[ user-up | user-down ]
CLI Example::
salt '*' bigip.modify_pool_member bigip admin admin my-pool 10.2.2.1:80 state=use-down session=user-disabled
'''
params = {
'connection-limit': connection_limit,
'description': description,
'dynamic-ratio': dynamic_ratio,
'inherit-profile': inherit_profile,
'logging': logging,
'monitor': monitor,
'priority-group': priority_group,
'profiles': profiles,
'rate-limit': rate_limit,
'ratio': ratio,
'session': session,
'state': state
}
#build session
bigip_session = _build_session(username, password)
#build payload
payload = _loop_payload(params)
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members/{member}'.format(name=name, member=member), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_pool_member(hostname, username, password, name, member):
'''
A function to connect to a bigip device and delete a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The name of the pool member to delete
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-pool 10.2.2.2:80
'''
#build session
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members/{member}'.format(name=name, member=member))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_virtual(hostname, username, password, name=None):
'''
A function to connect to a bigip device and list all virtuals or a specific virtual.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to list. If no name is specified than all
virtuals will be listed.
CLI Example::
salt '*' bigip.list_virtual bigip admin admin my-virtual
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}/?expandSubcollections=true'.format(name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual')
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_virtual(hostname, username, password, name, destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None):
r'''
A function to connect to a bigip device and create a virtual server.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no]
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward
(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[none | profile1,profile2,profile3 ... ]
profiles
[none | default | profile1,profile2,profile3 ... ]
policies
[none | default | policy1,policy2,policy3 ... ]
rate_class
[name]
rate_limit
[integer]
rate_limit_mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limitçsrc
[integer]
rules
[none | [rule_one,rule_two ...] ]
related_rules
[none | [rule_one,rule_two ...] ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | class_one,class_two ... ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | [enabled|disabled]:vlan1,vlan2,vlan3 ... ]
CLI Examples::
salt '*' bigip.create_virtual bigip admin admin my-virtual-3 26.2.2.5:80 \
pool=my-http-pool-http profiles=http,tcp
salt '*' bigip.create_virtual bigip admin admin my-virtual-3 43.2.2.5:80 \
pool=test-http-pool-http profiles=http,websecurity persist=cookie,hash \
policies=asm_auto_l7_policy__http-virtual \
rules=_sys_APM_ExchangeSupport_helper,_sys_https_redirect \
related_rules=_sys_APM_activesync,_sys_APM_ExchangeSupport_helper \
source_address_translation=snat:my-snat-pool \
translate_address=enabled translate_port=enabled \
traffic_classes=my-class,other-class \
vlans=enabled:external,internal
'''
params = {
'pool': pool,
'auto-lasthop': auto_lasthop,
'bwc-policy': bwc_policy,
'connection-limit': connection_limit,
'description': description,
'fallback-persistence': fallback_persistence,
'flow-eviction-policy': flow_eviction_policy,
'gtm-score': gtm_score,
'ip-protocol': ip_protocol,
'last-hop-pool': last_hop_pool,
'mask': mask,
'mirror': mirror,
'nat64': nat64,
'persist': persist,
'rate-class': rate_class,
'rate-limit': rate_limit,
'rate-limit-mode': rate_limit_mode,
'rate-limit-dst': rate_limit_dst,
'rate-limit-src': rate_limit_src,
'source': source,
'source-port': source_port,
'translate-address': translate_address,
'translate-port': translate_port
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'address-status': {'type': 'yes_no', 'value': address_status},
'cmp-enabled': {'type': 'yes_no', 'value': cmp_enabled},
'dhcp-relay': {'type': 'true_false', 'value': dhcp_relay},
'reject': {'type': 'true_false', 'value': reject},
'12-forward': {'type': 'true_false', 'value': twelve_forward},
'internal': {'type': 'true_false', 'value': internal},
'ip-forward': {'type': 'true_false', 'value': ip_forward}
}
#build session
bigip_session = _build_session(username, password)
#build payload
payload = _loop_payload(params)
payload['name'] = name
payload['destination'] = destination
#determine toggles
payload = _determine_toggles(payload, toggles)
#specify profiles if provided
if profiles is not None:
payload['profiles'] = _build_list(profiles, 'ltm:virtual:profile')
#specify persist if provided
if persist is not None:
payload['persist'] = _build_list(persist, 'ltm:virtual:persist')
#specify policies if provided
if policies is not None:
payload['policies'] = _build_list(policies, 'ltm:virtual:policy')
#specify rules if provided
if rules is not None:
payload['rules'] = _build_list(rules, None)
#specify related-rules if provided
if related_rules is not None:
payload['related-rules'] = _build_list(related_rules, None)
#handle source-address-translation
if source_address_translation is not None:
#check to see if this is already a dictionary first
if isinstance(source_address_translation, dict):
payload['source-address-translation'] = source_address_translation
elif source_address_translation == 'none':
payload['source-address-translation'] = {'pool': 'none', 'type': 'none'}
elif source_address_translation == 'automap':
payload['source-address-translation'] = {'pool': 'none', 'type': 'automap'}
elif source_address_translation == 'lsn':
payload['source-address-translation'] = {'pool': 'none', 'type': 'lsn'}
elif source_address_translation.startswith('snat'):
snat_pool = source_address_translation.split(':')[1]
payload['source-address-translation'] = {'pool': snat_pool, 'type': 'snat'}
#specify related-rules if provided
if traffic_classes is not None:
payload['traffic-classes'] = _build_list(traffic_classes, None)
#handle vlans
if vlans is not None:
#ceck to see if vlans is a dictionary (used when state makes use of function)
if isinstance(vlans, dict):
try:
payload['vlans'] = vlans['vlan_ids']
if vlans['enabled']:
payload['vlans-enabled'] = True
elif vlans['disabled']:
payload['vlans-disabled'] = True
except Exception:
return 'Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}'.format(vlans=vlans)
elif vlans == 'none':
payload['vlans'] = 'none'
elif vlans == 'default':
payload['vlans'] = 'default'
elif isinstance(vlans, six.string_types) and (vlans.startswith('enabled') or vlans.startswith('disabled')):
try:
vlans_setting = vlans.split(':')[0]
payload['vlans'] = vlans.split(':')[1].split(',')
if vlans_setting == 'disabled':
payload['vlans-disabled'] = True
elif vlans_setting == 'enabled':
payload['vlans-enabled'] = True
except Exception:
return 'Error: Unable to Parse vlans option: \n\tvlans={vlans}'.format(vlans=vlans)
else:
return 'Error: vlans must be a dictionary or string.'
#determine state
if state is not None:
if state == 'enabled':
payload['enabled'] = True
elif state == 'disabled':
payload['disabled'] = True
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_virtual(hostname, username, password, name,
destination=None,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None):
'''
A function to connect to a bigip device and modify an existing virtual server.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to modify
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward
(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[none | profile1,profile2,profile3 ... ]
profiles
[none | default | profile1,profile2,profile3 ... ]
policies
[none | default | policy1,policy2,policy3 ... ]
rate_class
[name]
rate_limit
[integer]
rate_limitr_mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limit_src
[integer]
rules
[none | [rule_one,rule_two ...] ]
related_rules
[none | [rule_one,rule_two ...] ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disable]
traffic_classes
[none | default | class_one,class_two ... ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | [enabled|disabled]:vlan1,vlan2,vlan3 ... ]
CLI Example::
salt '*' bigip.modify_virtual bigip admin admin my-virtual source_address_translation=none
salt '*' bigip.modify_virtual bigip admin admin my-virtual rules=my-rule,my-other-rule
'''
params = {
'destination': destination,
'pool': pool,
'auto-lasthop': auto_lasthop,
'bwc-policy': bwc_policy,
'connection-limit': connection_limit,
'description': description,
'fallback-persistence': fallback_persistence,
'flow-eviction-policy': flow_eviction_policy,
'gtm-score': gtm_score,
'ip-protocol': ip_protocol,
'last-hop-pool': last_hop_pool,
'mask': mask,
'mirror': mirror,
'nat64': nat64,
'persist': persist,
'rate-class': rate_class,
'rate-limit': rate_limit,
'rate-limit-mode': rate_limit_mode,
'rate-limit-dst': rate_limit_dst,
'rate-limit-src': rate_limit_src,
'source': source,
'source-port': source_port,
'translate-address': translate_address,
'translate-port': translate_port
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'address-status': {'type': 'yes_no', 'value': address_status},
'cmp-enabled': {'type': 'yes_no', 'value': cmp_enabled},
'dhcp-relay': {'type': 'true_false', 'value': dhcp_relay},
'reject': {'type': 'true_false', 'value': reject},
'12-forward': {'type': 'true_false', 'value': twelve_forward},
'internal': {'type': 'true_false', 'value': internal},
'ip-forward': {'type': 'true_false', 'value': ip_forward}
}
#build session
bigip_session = _build_session(username, password)
#build payload
payload = _loop_payload(params)
payload['name'] = name
#determine toggles
payload = _determine_toggles(payload, toggles)
#specify profiles if provided
if profiles is not None:
payload['profiles'] = _build_list(profiles, 'ltm:virtual:profile')
#specify persist if provided
if persist is not None:
payload['persist'] = _build_list(persist, 'ltm:virtual:persist')
#specify policies if provided
if policies is not None:
payload['policies'] = _build_list(policies, 'ltm:virtual:policy')
#specify rules if provided
if rules is not None:
payload['rules'] = _build_list(rules, None)
#specify related-rules if provided
if related_rules is not None:
payload['related-rules'] = _build_list(related_rules, None)
#handle source-address-translation
if source_address_translation is not None:
if source_address_translation == 'none':
payload['source-address-translation'] = {'pool': 'none', 'type': 'none'}
elif source_address_translation == 'automap':
payload['source-address-translation'] = {'pool': 'none', 'type': 'automap'}
elif source_address_translation == 'lsn':
payload['source-address-translation'] = {'pool': 'none', 'type': 'lsn'}
elif source_address_translation.startswith('snat'):
snat_pool = source_address_translation.split(':')[1]
payload['source-address-translation'] = {'pool': snat_pool, 'type': 'snat'}
#specify related-rules if provided
if traffic_classes is not None:
payload['traffic-classes'] = _build_list(traffic_classes, None)
#handle vlans
if vlans is not None:
#ceck to see if vlans is a dictionary (used when state makes use of function)
if isinstance(vlans, dict):
try:
payload['vlans'] = vlans['vlan_ids']
if vlans['enabled']:
payload['vlans-enabled'] = True
elif vlans['disabled']:
payload['vlans-disabled'] = True
except Exception:
return 'Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}'.format(vlans=vlans)
elif vlans == 'none':
payload['vlans'] = 'none'
elif vlans == 'default':
payload['vlans'] = 'default'
elif vlans.startswith('enabled') or vlans.startswith('disabled'):
try:
vlans_setting = vlans.split(':')[0]
payload['vlans'] = vlans.split(':')[1].split(',')
if vlans_setting == 'disabled':
payload['vlans-disabled'] = True
elif vlans_setting == 'enabled':
payload['vlans-enabled'] = True
except Exception:
return 'Error: Unable to Parse vlans option: \n\tvlans={vlans}'.format(vlans=vlans)
#determine state
if state is not None:
if state == 'enabled':
payload['enabled'] = True
elif state == 'disabled':
payload['disabled'] = True
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_virtual(hostname, username, password, name):
'''
A function to connect to a bigip device and delete a specific virtual.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to delete
CLI Example::
salt '*' bigip.delete_virtual bigip admin admin my-virtual
'''
#build session
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}'.format(name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_monitor(hostname, username, password, monitor_type, name=None, ):
'''
A function to connect to a bigip device and list an existing monitor. If no name is provided than all
monitors of the specified type will be listed.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor(s) to list
name
The name of the monitor to list
CLI Example::
salt '*' bigip.list_monitor bigip admin admin http my-http-monitor
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}?expandSubcollections=true'.format(type=monitor_type, name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}'.format(type=monitor_type))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
'''
A function to connect to a bigip device and create a monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
CLI Example::
salt '*' bigip.create_monitor bigip admin admin http my-http-monitor timeout=10 interval=5
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
payload['name'] = name
#there's a ton of different monitors and a ton of options for each type of monitor.
#this logic relies that the end user knows which options are meant for which monitor types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'type']:
key = key.replace('_', '-')
payload[key] = value
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}'.format(type=monitor_type), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_monitor(hostname, username, password, monitor_type, name, **kwargs):
'''
A function to connect to a bigip device and modify an existing monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to modify
name
The name of the monitor to modify
kwargs
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
CLI Example::
salt '*' bigip.modify_monitor bigip admin admin http my-http-monitor timout=16 interval=6
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
#there's a ton of different monitors and a ton of options for each type of monitor.
#this logic relies that the end user knows which options are meant for which monitor types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'type', 'name']:
key = key.replace('_', '-')
payload[key] = value
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}'.format(type=monitor_type, name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_monitor(hostname, username, password, monitor_type, name):
'''
A function to connect to a bigip device and delete an existing monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to delete
name
The name of the monitor to delete
CLI Example::
salt '*' bigip.delete_monitor bigip admin admin http my-http-monitor
'''
#build sessions
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}'.format(type=monitor_type, name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_profile(hostname, username, password, profile_type, name=None, ):
'''
A function to connect to a bigip device and list an existing profile. If no name is provided than all
profiles of the specified type will be listed.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile(s) to list
name
The name of the profile to list
CLI Example::
salt '*' bigip.list_profile bigip admin admin http my-http-profile
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}?expandSubcollections=true'.format(type=profile_type, name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}'.format(type=profile_type))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_profile(hostname, username, password, profile_type, name, **kwargs):
r'''
A function to connect to a bigip device and create a profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
``[ arg=val ] ... [arg=key1:val1,key2:val2] ...``
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
Creating Complex Args
Profiles can get pretty complicated in terms of the amount of possible
config options. Use the following shorthand to create complex arguments such
as lists, dictionaries, and lists of dictionaries. An option is also
provided to pass raw json as well.
lists ``[i,i,i]``:
``param='item1,item2,item3'``
Dictionary ``[k:v,k:v,k,v]``:
``param='key-1:val-1,key-2:val2,key-3:va-3'``
List of Dictionaries ``[k:v,k:v|k:v,k:v|k:v,k:v]``:
``param='key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2'``
JSON: ``'j{ ... }j'``:
``cert-key-chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'``
Escaping Delimiters:
Use ``\,`` or ``\:`` or ``\|`` to escape characters which shouldn't
be treated as delimiters i.e. ``ciphers='DEFAULT\:!SSLv3'``
CLI Examples::
salt '*' bigip.create_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http'
salt '*' bigip.create_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http' \
enforcement=maxHeaderCount:3200,maxRequests:10
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
payload['name'] = name
#there's a ton of different profiles and a ton of options for each type of profile.
#this logic relies that the end user knows which options are meant for which profile types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'profile_type']:
key = key.replace('_', '-')
try:
payload[key] = _set_value(value)
except salt.exceptions.CommandExecutionError:
return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}'.format(key=key, value=value)
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}'.format(type=profile_type), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_profile(hostname, username, password, profile_type, name, **kwargs):
r'''
A function to connect to a bigip device and create a profile.
A function to connect to a bigip device and create a profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
``[ arg=val ] ... [arg=key1:val1,key2:val2] ...``
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
Creating Complex Args
Profiles can get pretty complicated in terms of the amount of possible
config options. Use the following shorthand to create complex arguments such
as lists, dictionaries, and lists of dictionaries. An option is also
provided to pass raw json as well.
lists ``[i,i,i]``:
``param='item1,item2,item3'``
Dictionary ``[k:v,k:v,k,v]``:
``param='key-1:val-1,key-2:val2,key-3:va-3'``
List of Dictionaries ``[k:v,k:v|k:v,k:v|k:v,k:v]``:
``param='key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2'``
JSON: ``'j{ ... }j'``:
``cert-key-chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'``
Escaping Delimiters:
Use ``\,`` or ``\:`` or ``\|`` to escape characters which shouldn't
be treated as delimiters i.e. ``ciphers='DEFAULT\:!SSLv3'``
CLI Examples::
salt '*' bigip.modify_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http'
salt '*' bigip.modify_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http' \
enforcement=maxHeaderCount:3200,maxRequests:10
salt '*' bigip.modify_profile bigip admin admin client-ssl my-client-ssl-1 retainCertificate=false \
ciphers='DEFAULT\:!SSLv3'
cert_key_chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
payload['name'] = name
#there's a ton of different profiles and a ton of options for each type of profile.
#this logic relies that the end user knows which options are meant for which profile types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'profile_type']:
key = key.replace('_', '-')
try:
payload[key] = _set_value(value)
except salt.exceptions.CommandExecutionError:
return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}'.format(key=key, value=value)
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}'.format(type=profile_type, name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_profile(hostname, username, password, profile_type, name):
'''
A function to connect to a bigip device and delete an existing profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to delete
name
The name of the profile to delete
CLI Example::
salt '*' bigip.delete_profile bigip admin admin http my-http-profile
'''
#build sessions
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}'.format(type=profile_type, name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
| 31.59692
| 171
| 0.618353
|
12fb7e00f96ab5db12948a35295803f3ad109c79
| 1,676
|
py
|
Python
|
koreto/rndm.py
|
xvdp/koreto
|
70f683aeec5e43a15549d447b8f540fa4c5fde4f
|
[
"MIT"
] | null | null | null |
koreto/rndm.py
|
xvdp/koreto
|
70f683aeec5e43a15549d447b8f540fa4c5fde4f
|
[
"MIT"
] | null | null | null |
koreto/rndm.py
|
xvdp/koreto
|
70f683aeec5e43a15549d447b8f540fa4c5fde4f
|
[
"MIT"
] | null | null | null |
""" random utilities """
import numpy as np
from koreto import WITH_TORCH
if WITH_TORCH:
import torch
# pylint: disable=no-member
def unique_randint(low, high, size, overflow=1.2, out_type="torch"):
""" returns a unique set of random ints
Args
low (int)
high (int)
size (int) < high - low
overflow (float [1.2]) > 1
out_type (str ["torch"]) | "numpy"
"""
assert size < high - low, "size needs to be smaller than range"
assert overflow > 1
if not WITH_TORCH:
out_type = "numpy"
if out_type[0] == "n":
return _np_unique_randint(low, high, size, overflow=1.2)
samples = torch.unique(torch.randint(low, high, (int(size*overflow),)))
num_samples = len(samples)
if num_samples < size:
return unique_randint(low, high, size, overflow*1.5)
excess = num_samples - size
if not excess:
return samples
_i = torch.randint(0, size-excess, (1,))
return torch.cat([samples[0:_i], samples[_i +excess:]])
def _np_unique_randint(low, high, size, overflow=1.2):
""" returns a unique set of random ints
Args
low (int)
high (int)
size (int) < high - low
overflow (float [1.2]) > 1
"""
samples = np.unique(np.random.randint(low, high, int(size*overflow)))
num_samples = len(samples)
if num_samples < size:
return _np_unique_randint(low, high, size, overflow*1.5)
excess = num_samples - size
if not excess:
return samples
i = np.random.randint(0, size-excess)
return np.concatenate([samples[0:i], samples[i+excess:]])
| 29.403509
| 75
| 0.597852
|
5a1de7d4d772805ff3d96f4283f18971d5f2aed6
| 4,024
|
py
|
Python
|
goose/utils/__init__.py
|
chenjue/python-goose
|
840ced108731a287422bc4383df69737753a1cdd
|
[
"Apache-2.0"
] | 3,052
|
2015-01-01T09:03:06.000Z
|
2022-03-30T17:47:30.000Z
|
goose/utils/__init__.py
|
17702296834/python-goose
|
09023ec9f5ef26a628a2365616c0a7c864f0ecea
|
[
"Apache-2.0"
] | 87
|
2015-01-01T02:55:22.000Z
|
2021-11-19T01:52:48.000Z
|
goose/utils/__init__.py
|
17702296834/python-goose
|
09023ec9f5ef26a628a2365616c0a7c864f0ecea
|
[
"Apache-2.0"
] | 765
|
2015-01-02T13:45:53.000Z
|
2022-02-12T16:20:44.000Z
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import hashlib
import re
import os
import goose
import codecs
import urlparse
class BuildURL(object):
def __init__(self, url, finalurl=None):
self.url = url
self.finalurl = finalurl
def getHostname(self, o):
if o.hostname:
return o.hotname
elif self.finalurl:
oo = urlparse(self.finalurl)
if oo.hostname:
return oo.hostname
return None
def getScheme(self, o):
if o.scheme:
return o.scheme
elif self.finalurl:
oo = urlparse(self.finalurl)
if oo.scheme:
return oo.scheme
return 'http'
def getUrl(self):
"""\
"""
url_obj = urlparse(self.url)
scheme = self.getScheme(url_obj)
hostname = self.getHostname(url_obj)
class FileHelper(object):
@classmethod
def loadResourceFile(self, filename):
if not os.path.isabs('filename'):
dirpath = os.path.dirname(goose.__file__)
path = os.path.join(dirpath, 'resources', filename)
else:
path = filename
try:
f = codecs.open(path, 'r', 'utf-8')
content = f.read()
f.close()
return content
except IOError:
raise IOError("Couldn't open file %s" % path)
class ParsingCandidate(object):
def __init__(self, urlString, link_hash):
self.urlString = self.url = urlString
self.link_hash = link_hash
class RawHelper(object):
@classmethod
def get_parsing_candidate(self, url, raw_html):
if isinstance(raw_html, unicode):
raw_html = raw_html.encode('utf-8')
link_hash = '%s.%s' % (hashlib.md5(raw_html).hexdigest(), time.time())
return ParsingCandidate(url, link_hash)
class URLHelper(object):
@classmethod
def get_parsing_candidate(self, url_to_crawl):
# replace shebang is urls
final_url = url_to_crawl.replace('#!', '?_escaped_fragment_=') \
if '#!' in url_to_crawl else url_to_crawl
link_hash = '%s.%s' % (hashlib.md5(final_url).hexdigest(), time.time())
return ParsingCandidate(final_url, link_hash)
class StringReplacement(object):
def __init__(self, pattern, replaceWith):
self.pattern = pattern
self.replaceWith = replaceWith
def replaceAll(self, string):
if not string:
return u''
return string.replace(self.pattern, self.replaceWith)
class ReplaceSequence(object):
def __init__(self):
self.replacements = []
#@classmethod
def create(self, firstPattern, replaceWith=None):
result = StringReplacement(firstPattern, replaceWith or u'')
self.replacements.append(result)
return self
def append(self, pattern, replaceWith=None):
return self.create(pattern, replaceWith)
def replaceAll(self, string):
if not string:
return u''
mutatedString = string
for rp in self.replacements:
mutatedString = rp.replaceAll(mutatedString)
return mutatedString
| 28.13986
| 79
| 0.645129
|
1b035a028b650810306046d4feb72ad53ced2070
| 2,923
|
py
|
Python
|
src/python/pants/util/eval.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | 94
|
2015-01-15T21:24:20.000Z
|
2022-02-16T16:55:43.000Z
|
src/python/pants/util/eval.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | 5
|
2020-07-18T01:04:43.000Z
|
2021-05-10T08:40:56.000Z
|
src/python/pants/util/eval.py
|
StephanErb/pants
|
a368267b6b4cf50138ba567f582409ed31bf5db9
|
[
"Apache-2.0"
] | 47
|
2015-02-25T02:20:07.000Z
|
2022-03-21T00:59:16.000Z
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import range, str
from textwrap import dedent
from future.utils import string_types
def parse_expression(val, acceptable_types, name=None, raise_type=ValueError):
"""Attempts to parse the given `val` as a python expression of the specified `acceptable_types`.
:param string val: A string containing a python expression.
:param acceptable_types: The acceptable types of the parsed object.
:type acceptable_types: type|tuple of types. The tuple may be nested; ie anything `isinstance`
accepts.
:param string name: An optional logical name for the value being parsed; ie if the literal val
represents a person's age, 'age'.
:param type raise_type: The type of exception to raise for all failures; ValueError by default.
:raises: If `val` is not a valid python literal expression or it is but evaluates to an object
that is not a an instance of one of the `acceptable_types`.
"""
def format_type(typ):
return typ.__name__
if not isinstance(val, string_types):
raise raise_type('The raw `val` is not a string. Given {} of type {}.'
.format(val, format_type(type(val))))
def get_name():
return repr(name) if name else 'value'
def format_raw_value():
lines = val.splitlines()
for line_number in range(0, len(lines)):
lines[line_number] = "{line_number:{width}}: {line}".format(
line_number=line_number + 1,
line=lines[line_number],
width=len(str(len(lines))))
return '\n'.join(lines)
try:
parsed_value = eval(val)
except Exception as e:
raise raise_type(dedent("""\
The {name} cannot be evaluated as a literal expression: {error}
Given raw value:
{value}
""".format(name=get_name(),
error=e,
value=format_raw_value())))
if not isinstance(parsed_value, acceptable_types):
def iter_types(types):
if isinstance(types, type):
yield types
elif isinstance(types, tuple):
for item in types:
for typ in iter_types(item):
yield typ
else:
raise ValueError('The given acceptable_types is not a valid type (tuple): {}'
.format(acceptable_types))
raise raise_type(dedent("""\
The {name} is not of the expected type(s): {types}:
Given the following raw value that evaluated to type {type}:
{value}
""".format(name=get_name(),
types=', '.join(format_type(t) for t in iter_types(acceptable_types)),
type=format_type(type(parsed_value)),
value=format_raw_value())))
return parsed_value
| 37.961039
| 98
| 0.657886
|
74a6c399649d26d5f4053baefbffd70b0bf38fa9
| 2,834
|
py
|
Python
|
pythonProject1/venv/Lib/site-packages/json_api_doc/serialization.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | 23
|
2019-01-11T10:14:39.000Z
|
2021-11-12T12:03:49.000Z
|
pythonProject1/venv/Lib/site-packages/json_api_doc/serialization.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | 22
|
2018-05-16T20:50:42.000Z
|
2020-12-04T01:22:54.000Z
|
pythonProject1/venv/Lib/site-packages/json_api_doc/serialization.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | 12
|
2019-01-05T17:12:40.000Z
|
2020-11-30T14:54:28.000Z
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
def serialize(data={}, errors={}, meta={}, links={}):
"""
:param data: Dict with data to serialize
:param errors: Dict with error data to serialize
:param meta: Dict with meta data to serialize
:returns: Dict normalized as a valid JSON API document
"""
if data and errors:
raise AttributeError("""Only 'data' or 'errors' can be present in a
valid JSON API document""")
included = OrderedDict()
res = {}
if data:
if isinstance(data, list):
res["data"] = list(
map(lambda item: _serialize(item, included), data))
else:
res["data"] = _serialize(data, included)
elif isinstance(data, list):
res["data"] = []
if included:
res["included"] = list(included.values())
if meta:
res["meta"] = meta
if errors:
res["errors"] = errors
if links:
res["links"] = links
return res or {"data": None}
def _serialize(data, included):
obj_type = data.get("$type", None)
if obj_type is None:
raise AttributeError("Missing object $type")
res = _expand(data, included)
res["type"] = obj_type
obj_id = data.get("id", None)
if obj_id is not None:
res["id"] = obj_id
return res
def _expand(data, included):
res = {}
attrs = {}
rels = {}
for k, v in data.items():
if k in ["$type", "id"]:
continue
if isinstance(v, dict):
embedded, is_res = _expand_included(v, included)
if is_res:
rels[k] = {
"data": embedded
}
else:
attrs[k] = embedded
elif isinstance(v, list):
embedded = list(map(lambda l: _expand_included(l, included), v))
if all(map(lambda i: i[1], embedded)):
rels[k] = {
"data": list(map(lambda i: i[0], embedded))
}
else:
attrs[k] = list(map(lambda i: i[0], embedded))
else:
attrs[k] = v
if len(attrs):
res["attributes"] = attrs
if len(rels):
res["relationships"] = rels
return res
def _expand_included(data, included):
if not isinstance(data, dict):
return data, False
typ = data.get("$type", None)
id = data.get("id", None)
if typ is None or id is None:
# not a sub-resource, return as is
return data, False
if typ is not None and id is not None and (typ, id) not in included:
serialized = _expand(data, included)
serialized["type"] = typ
serialized["id"] = id
included[(typ, id)] = serialized
return {"type": typ, "id": id}, True
| 25.079646
| 76
| 0.529287
|
09f749cba566314c7bca117724db1325a857c58e
| 443
|
py
|
Python
|
SNE_lab/config.py
|
LplusKira/SNE_lab
|
dd51d5bb8e18244605a9e25f26264fc0ff743c00
|
[
"MIT"
] | 2
|
2019-02-21T14:53:34.000Z
|
2020-04-02T07:13:04.000Z
|
SNE_lab/config.py
|
LplusKira/SNE_lab
|
dd51d5bb8e18244605a9e25f26264fc0ff743c00
|
[
"MIT"
] | null | null | null |
SNE_lab/config.py
|
LplusKira/SNE_lab
|
dd51d5bb8e18244605a9e25f26264fc0ff743c00
|
[
"MIT"
] | null | null | null |
from os import environ
import logging
# For logging
DEBUG2LOG_LEVEL = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
DEBUG = environ.get('DEBUG') # Logging level
LOG_LEVEL = DEBUG2LOG_LEVEL.get(DEBUG, DEBUG2LOG_LEVEL['INFO'])
# For test (indep from logging level)
TEST_SNE = bool(environ.get('TEST_SNE'))
TEST_DIR = 'report/.testsne/'
| 24.611111
| 63
| 0.704289
|
57d29f34a45a92774203f5e23f2e7422c6249f3d
| 962
|
py
|
Python
|
book_center/book_center/bc_contact/forms.py
|
geodimitrov/Python-Web-Framework-SoftUni
|
06b7e11aee0024a564d1b266d5ed6271351ac116
|
[
"MIT"
] | null | null | null |
book_center/book_center/bc_contact/forms.py
|
geodimitrov/Python-Web-Framework-SoftUni
|
06b7e11aee0024a564d1b266d5ed6271351ac116
|
[
"MIT"
] | null | null | null |
book_center/book_center/bc_contact/forms.py
|
geodimitrov/Python-Web-Framework-SoftUni
|
06b7e11aee0024a564d1b266d5ed6271351ac116
|
[
"MIT"
] | null | null | null |
from book_center.bc_contact.models import BookCenterContactFormModel
from book_center.utils.validators import validate_bot_catcher_empty
from book_center.utils.mixins import NoLabelFormMixin
from django import forms
class ContactForm(NoLabelFormMixin, forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_bootstrap()
class Meta:
model = BookCenterContactFormModel
exclude = ('reply',)
widgets = {
'subject': forms.TextInput(attrs={'placeholder': 'Subject'}),
'email': forms.EmailInput(attrs={'placeholder': 'Email'}),
'message': forms.Textarea(attrs={'placeholder': 'Tell us what\'s on your mind', 'rows': 6})
}
bots_catcher = forms.CharField(
widget=forms.HiddenInput(),
required=False,
)
def clean_bots_catcher(self):
validate_bot_catcher_empty(self.cleaned_data['bots_catcher'])
| 34.357143
| 103
| 0.678794
|
c5d28e372b6292e7b2ead24bf808ded021689a57
| 858
|
py
|
Python
|
jhu_primitives/core/JHUEvaluator.py
|
jagterberg/primitives-interfaces
|
b5d31999593baa926a4fbd5d3bdd752d8c468e6a
|
[
"Apache-2.0"
] | null | null | null |
jhu_primitives/core/JHUEvaluator.py
|
jagterberg/primitives-interfaces
|
b5d31999593baa926a4fbd5d3bdd752d8c468e6a
|
[
"Apache-2.0"
] | 10
|
2017-09-12T05:45:43.000Z
|
2017-09-18T15:26:43.000Z
|
jhu_primitives/core/JHUEvaluator.py
|
youngser/D3M
|
a9998ca12644264d61e8ce5258a54f25b5f9f726
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# JHUEvaluator.py
from sklearn.metrics import adjusted_rand_score
from Evaluator import Evaluator
class JHUEvaluator(Evaluator):
def __init__(self):
pass
def get_accuracy(self, **kwargs):
"""
Use ARI to evaluate our procedure
** Keyword Arguments **:
predicted_labels:
- The predicted labels from your model
true_labels:
- The true known labels from your model
"""
if "predicted_labels" in kwargs and "true_labels" in kwargs:
return 100*(adjusted_rand_score(kwargs["predicted_labels"],
kwargs["true_labels"]))
else:
return 0
def test():
ev = JHUEvaluator()
print("Ev: ", ev.get_accuracy(predicted_labels=[1,2,3,4,5],
true_labels=[5,4,3,2,1]))
# test() # Not run
| 23.189189
| 71
| 0.60373
|
0dbb97a36009acf8010021e28956dabffa12debf
| 11,580
|
py
|
Python
|
yt_dlp/extractor/twitcasting.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 11
|
2022-01-06T22:09:50.000Z
|
2022-03-12T22:26:22.000Z
|
yt_dlp/extractor/twitcasting.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 4
|
2022-02-25T08:20:18.000Z
|
2022-03-17T16:16:20.000Z
|
yt_dlp/extractor/twitcasting.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 3
|
2022-02-19T08:59:13.000Z
|
2022-03-06T16:11:21.000Z
|
import itertools
import re
from .common import InfoExtractor
from ..dependencies import websockets
from ..utils import (
clean_html,
ExtractorError,
float_or_none,
get_element_by_class,
get_element_by_id,
parse_duration,
qualities,
str_to_int,
traverse_obj,
try_get,
unified_timestamp,
urlencode_postdata,
urljoin,
)
class TwitCastingIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<uploader_id>[^/]+)/(?:movie|twplayer)/(?P<id>\d+)'
_M3U8_HEADERS = {
'Origin': 'https://twitcasting.tv',
'Referer': 'https://twitcasting.tv/',
}
_TESTS = [{
'url': 'https://twitcasting.tv/ivetesangalo/movie/2357609',
'md5': '745243cad58c4681dc752490f7540d7f',
'info_dict': {
'id': '2357609',
'ext': 'mp4',
'title': 'Live #2357609',
'uploader_id': 'ivetesangalo',
'description': 'Twitter Oficial da cantora brasileira Ivete Sangalo.',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20110822',
'timestamp': 1314010824,
'duration': 32,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://twitcasting.tv/mttbernardini/movie/3689740',
'info_dict': {
'id': '3689740',
'ext': 'mp4',
'title': 'Live playing something #3689740',
'uploader_id': 'mttbernardini',
'description': 'Salve, io sono Matto (ma con la e). Questa è la mia presentazione, in quanto sono letteralmente matto (nel senso di strano), con qualcosa in più.',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20120212',
'timestamp': 1329028024,
'duration': 681,
'view_count': int,
},
'params': {
'skip_download': True,
'videopassword': 'abc',
},
}, {
'note': 'archive is split in 2 parts',
'url': 'https://twitcasting.tv/loft_heaven/movie/685979292',
'info_dict': {
'id': '685979292',
'ext': 'mp4',
'title': '南波一海のhear_here “ナタリー望月哲さんに聞く編集と「渋谷系狂騒曲」”',
'duration': 6964.599334,
},
'playlist_mincount': 2,
}]
def _real_extract(self, url):
uploader_id, video_id = self._match_valid_url(url).groups()
video_password = self.get_param('videopassword')
request_data = None
if video_password:
request_data = urlencode_postdata({
'password': video_password,
}, encoding='utf-8')
webpage, urlh = self._download_webpage_handle(
url, video_id, data=request_data,
headers={'Origin': 'https://twitcasting.tv'})
if urlh.geturl() != url and request_data:
webpage = self._download_webpage(
urlh.geturl(), video_id, data=request_data,
headers={'Origin': 'https://twitcasting.tv'},
note='Retrying authentication')
# has to check here as the first request can contain password input form even if the password is correct
if re.search(r'<form\s+method="POST">\s*<input\s+[^>]+?name="password"', webpage):
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
title = (clean_html(get_element_by_id('movietitle', webpage))
or self._html_search_meta(['og:title', 'twitter:title'], webpage, fatal=True))
video_js_data = try_get(
webpage,
lambda x: self._parse_json(self._search_regex(
r'data-movie-playlist=\'([^\']+?)\'',
x, 'movie playlist', default=None), video_id)['2'], list)
thumbnail = traverse_obj(video_js_data, (0, 'thumbnailUrl')) or self._og_search_thumbnail(webpage)
description = clean_html(get_element_by_id(
'authorcomment', webpage)) or self._html_search_meta(
['description', 'og:description', 'twitter:description'], webpage)
duration = (try_get(video_js_data, lambda x: sum(float_or_none(y.get('duration')) for y in x) / 1000)
or parse_duration(clean_html(get_element_by_class('tw-player-duration-time', webpage))))
view_count = str_to_int(self._search_regex(
(r'Total\s*:\s*([\d,]+)\s*Views', r'総視聴者\s*:\s*([\d,]+)\s*</'), webpage, 'views', None))
timestamp = unified_timestamp(self._search_regex(
r'data-toggle="true"[^>]+datetime="([^"]+)"',
webpage, 'datetime', None))
stream_server_data = self._download_json(
'https://twitcasting.tv/streamserver.php?target=%s&mode=client' % uploader_id, video_id,
'Downloading live info', fatal=False)
is_live = 'data-status="online"' in webpage
if not traverse_obj(stream_server_data, 'llfmp4') and is_live:
self.raise_login_required(method='cookies')
base_dict = {
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'is_live': is_live,
}
def find_dmu(x):
data_movie_url = self._search_regex(
r'data-movie-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
x, 'm3u8 url', group='url', default=None)
if data_movie_url:
return [data_movie_url]
m3u8_urls = (try_get(webpage, find_dmu, list)
or traverse_obj(video_js_data, (..., 'source', 'url'))
or ([f'https://twitcasting.tv/{uploader_id}/metastream.m3u8'] if is_live else None))
if not m3u8_urls:
raise ExtractorError('Failed to get m3u8 playlist')
if is_live:
m3u8_url = m3u8_urls[0]
formats = self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', m3u8_id='hls',
live=True, headers=self._M3U8_HEADERS)
if traverse_obj(stream_server_data, ('hls', 'source')):
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', m3u8_id='source',
live=True, query={'mode': 'source'},
note='Downloading source quality m3u8',
headers=self._M3U8_HEADERS, fatal=False))
if websockets:
qq = qualities(['base', 'mobilesource', 'main'])
streams = traverse_obj(stream_server_data, ('llfmp4', 'streams')) or {}
for mode, ws_url in streams.items():
formats.append({
'url': ws_url,
'format_id': 'ws-%s' % mode,
'ext': 'mp4',
'quality': qq(mode),
'source_preference': -10,
# TwitCasting simply sends moof atom directly over WS
'protocol': 'websocket_frag',
})
self._sort_formats(formats, ('source',))
infodict = {
'formats': formats
}
elif len(m3u8_urls) == 1:
formats = self._extract_m3u8_formats(
m3u8_urls[0], video_id, 'mp4', headers=self._M3U8_HEADERS)
self._sort_formats(formats)
infodict = {
# No problem here since there's only one manifest
'formats': formats,
'http_headers': self._M3U8_HEADERS,
}
else:
infodict = {
'_type': 'multi_video',
'entries': [{
'id': f'{video_id}-{num}',
'url': m3u8_url,
'ext': 'mp4',
# Requesting the manifests here will cause download to fail.
# So use ffmpeg instead. See: https://github.com/yt-dlp/yt-dlp/issues/382
'protocol': 'm3u8',
'http_headers': self._M3U8_HEADERS,
**base_dict,
} for (num, m3u8_url) in enumerate(m3u8_urls)],
}
return {
'id': video_id,
**base_dict,
**infodict,
}
class TwitCastingLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<id>[^/]+)/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://twitcasting.tv/ivetesangalo',
'only_matching': True,
}]
def _real_extract(self, url):
uploader_id = self._match_id(url)
self.to_screen(
'Downloading live video of user {0}. '
'Pass "https://twitcasting.tv/{0}/show" to download the history'.format(uploader_id))
webpage = self._download_webpage(url, uploader_id)
current_live = self._search_regex(
(r'data-type="movie" data-id="(\d+)">',
r'tw-sound-flag-open-link" data-id="(\d+)" style=',),
webpage, 'current live ID', default=None)
if not current_live:
# fetch unfiltered /show to find running livestreams; we can't get ID of the password-protected livestream above
webpage = self._download_webpage(
f'https://twitcasting.tv/{uploader_id}/show/', uploader_id,
note='Downloading live history')
is_live = self._search_regex(r'(?s)(<span\s*class="tw-movie-thumbnail-badge"\s*data-status="live">\s*LIVE)', webpage, 'is live?', default=None)
if is_live:
# get the first live; running live is always at the first
current_live = self._search_regex(
r'(?s)<a\s+class="tw-movie-thumbnail"\s*href="/[^/]+/movie/(?P<video_id>\d+)"\s*>.+?</a>',
webpage, 'current live ID 2', default=None, group='video_id')
if not current_live:
raise ExtractorError('The user is not currently live')
return self.url_result('https://twitcasting.tv/%s/movie/%s' % (uploader_id, current_live))
class TwitCastingUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<id>[^/]+)/show/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://twitcasting.tv/noriyukicas/show',
'only_matching': True,
}]
def _entries(self, uploader_id):
base_url = next_url = 'https://twitcasting.tv/%s/show' % uploader_id
for page_num in itertools.count(1):
webpage = self._download_webpage(
next_url, uploader_id, query={'filter': 'watchable'}, note='Downloading page %d' % page_num)
matches = re.finditer(
r'''(?isx)<a\s+class="tw-movie-thumbnail"\s*href="(?P<url>/[^/]+/movie/\d+)"\s*>.+?</a>''',
webpage)
for mobj in matches:
yield self.url_result(urljoin(base_url, mobj.group('url')))
next_url = self._search_regex(
r'<a href="(/%s/show/%d-\d+)[?"]' % (re.escape(uploader_id), page_num),
webpage, 'next url', default=None)
next_url = urljoin(base_url, next_url)
if not next_url:
return
def _real_extract(self, url):
uploader_id = self._match_id(url)
return self.playlist_result(
self._entries(uploader_id), uploader_id, '%s - Live History' % uploader_id)
| 41.805054
| 175
| 0.545941
|
92a6f4e670e05251b47413f65819bdaa9f7caaa4
| 399
|
py
|
Python
|
fsapi/netRemote_sys_audio.py
|
jentz1986/shng-undok-plugin
|
c684a3c2d3f5e747bfc4df0000fb445a74b47334
|
[
"MIT"
] | null | null | null |
fsapi/netRemote_sys_audio.py
|
jentz1986/shng-undok-plugin
|
c684a3c2d3f5e747bfc4df0000fb445a74b47334
|
[
"MIT"
] | null | null | null |
fsapi/netRemote_sys_audio.py
|
jentz1986/shng-undok-plugin
|
c684a3c2d3f5e747bfc4df0000fb445a74b47334
|
[
"MIT"
] | null | null | null |
from .fsapi_exception import *
from .fsapi_node import *
@FSAPI_Node('bool', 'mute')
class FSAPI_Node_Mute(object):
key = 'netRemote.sys.audio.mute'
max_age = 5
get_url = "GET/{}"
set_url = "SET/{}"
@FSAPI_Node('u8', 'volume')
class FSAPI_Node_Volume(object):
key = 'netRemote.sys.audio.volume'
max_age = 5
get_url = "GET/{}"
set_url = "SET/{}"
| 21
| 39
| 0.60401
|
0b7d5fe62ce497f7d92855e7298e6a3e8731ffc1
| 3,239
|
py
|
Python
|
repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py
|
tmds/leapp-repository
|
7c9ea115a68530eb25f5c23d3fcadd60c501bf78
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py
|
tmds/leapp-repository
|
7c9ea115a68530eb25f5c23d3fcadd60c501bf78
|
[
"Apache-2.0"
] | 1
|
2022-03-07T15:34:11.000Z
|
2022-03-07T15:35:15.000Z
|
repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py
|
tmds/leapp-repository
|
7c9ea115a68530eb25f5c23d3fcadd60c501bf78
|
[
"Apache-2.0"
] | null | null | null |
import re
from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api, run
from leapp.models import YumConfig
# When the output spans multiple lines, each of the lines after the first one
# start with a ' <SPACES> : '
YUM_LOADED_PLUGINS_NEXT_LINE_START = ' +: '
def _parse_loaded_plugins(yum_output):
"""
Retrieves a list of plugins that are being loaded when calling yum.
:param dict yum_output: The result of running the yum command.
:rtype: list
:returns: A list of plugins that are being loaded when calling yum.
"""
# YUM might break the information about loaded plugins into multiple lines,
# we need to concaternate the list ourselves
loaded_plugins_str = ''
for line in yum_output['stdout']:
if line.startswith('Loaded plugins:'):
# We have found the first line that contains the plugins
plugins_on_this_line = line[16:] # Remove the `Loaded plugins: ` part
if plugins_on_this_line[-1] == ',':
plugins_on_this_line += ' '
loaded_plugins_str += plugins_on_this_line
continue
if loaded_plugins_str:
if re.match(YUM_LOADED_PLUGINS_NEXT_LINE_START, line):
# The list of plugins continues on this line
plugins_on_this_line = line.lstrip(' :') # Remove the leading spaces and semicolon
# Plugins are separated by ', ', however the space at the end of line might get dropped, add it
# so we can split it by ', ' later
if plugins_on_this_line[-1] == ',':
plugins_on_this_line += ' '
loaded_plugins_str += plugins_on_this_line
else:
# The list of loaded plugins ended
break
return loaded_plugins_str.split(', ')
def scan_enabled_yum_plugins():
"""
Runs the `yum` command and parses its output for enabled/loaded plugins.
:return: A list of enabled plugins.
:rtype: List
"""
# We rely on yum itself to report what plugins are used when it is invoked.
# An alternative approach would be to check /usr/lib/yum-plugins/ (install
# path for yum plugins) and parse corresponding configurations from
# /etc/yum/pluginconf.d/
if get_source_major_version() == '7':
# in case of yum, set debuglevel=2 to be sure the output is always
# same. The format of data is different for various debuglevels
yum_cmd = ['yum', '--setopt=debuglevel=2']
else:
# the verbose mode in dnf always set particular debuglevel, so the
# output is not affected by the default debug level set on the
# system
yum_cmd = ['dnf', '-v'] # On RHEL8 we need to supply an extra switch
yum_output = run(yum_cmd, split=True, checked=False) # The yum command will certainly fail (does not matter).
return _parse_loaded_plugins(yum_output)
def scan_yum_config():
"""
Scans the YUM configuration and produces :class:`YumConfig` message with the information found.
"""
config = YumConfig()
config.enabled_plugins = scan_enabled_yum_plugins()
api.produce(config)
| 36.806818
| 114
| 0.656993
|
2abc6896348156a7f4f26309b66b60487b9f0ce8
| 12,120
|
py
|
Python
|
pinet/CurveLanes/util_hourglass.py
|
ybarancan/STSU
|
0b9efa88739c517a7ca00e61faefa4b45714d312
|
[
"Apache-2.0"
] | 67
|
2021-10-06T13:48:44.000Z
|
2022-03-31T04:00:20.000Z
|
pinet/CurveLanes/util_hourglass.py
|
w23215/STSU
|
0b9efa88739c517a7ca00e61faefa4b45714d312
|
[
"Apache-2.0"
] | 6
|
2021-11-03T08:37:31.000Z
|
2022-03-19T10:13:10.000Z
|
pinet/CurveLanes/util_hourglass.py
|
w23215/STSU
|
0b9efa88739c517a7ca00e61faefa4b45714d312
|
[
"Apache-2.0"
] | 10
|
2021-12-08T10:54:45.000Z
|
2022-03-22T06:44:14.000Z
|
#########################################################################
##
## Some utility for training, data processing, and network.
##
#########################################################################
import torch
import torch.nn as nn
from pinet.CurveLanes.parameters import Parameters
p = Parameters()
def backward_hook(self, grad_input, grad_output):
print('grad_input norm:', grad_input[0].data.norm())
######################################################################
##
## Convolution layer modules
##
######################################################################
class Conv2D_BatchNorm_Relu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, padding, stride, bias=True, acti=True, dilation=1):
super(Conv2D_BatchNorm_Relu, self).__init__()
if acti:
self.cbr_unit = nn.Sequential(nn.Conv2d(in_channels, n_filters, k_size,
padding=padding, stride=stride, bias=bias, dilation=dilation),
nn.BatchNorm2d(n_filters),
#nn.ReLU(inplace=True),)
nn.PReLU(),)
else:
self.cbr_unit = nn.Conv2d(in_channels, n_filters, k_size, padding=padding, stride=stride, bias=bias, dilation=dilation)
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class bottleneck(nn.Module):
def __init__(self, in_channels, out_channels, acti=True):
super(bottleneck, self).__init__()
self.acti = acti
temp_channels = in_channels//4
if in_channels < 4:
temp_channels = in_channels
self.conv1 = Conv2D_BatchNorm_Relu(in_channels, temp_channels, 1, 0, 1)
self.conv2 = Conv2D_BatchNorm_Relu(temp_channels, temp_channels, 3, 1, 1)
self.conv3 = Conv2D_BatchNorm_Relu(temp_channels, out_channels, 1, 0, 1, acti = self.acti)
self.residual = Conv2D_BatchNorm_Relu(in_channels, out_channels, 1, 0, 1)
def forward(self, x):
re = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if not self.acti:
return out
re = self.residual(x)
out = out + re
return out
class bottleneck_down(nn.Module):
def __init__(self, in_channels, out_channels):
super(bottleneck_down, self).__init__()
temp_channels = in_channels//4
if in_channels < 4:
temp_channels = in_channels
self.conv1 = Conv2D_BatchNorm_Relu(in_channels, temp_channels, 3, 1, 2)
self.conv2 = Conv2D_BatchNorm_Relu(temp_channels, temp_channels, 3, 1, 1, dilation=1)
#self.conv3 = Conv2D_BatchNorm_Relu(temp_channels, out_channels, 1, 0, 1)
self.conv3 = nn.Conv2d(temp_channels, out_channels, 1, padding=0, stride=1, bias=True)
#self.residual = Conv2D_BatchNorm_Relu(in_channels, out_channels, 3, 1, 2, acti=False)
self.residual = nn.MaxPool2d(2, 2)
self.dropout = nn.Dropout2d(p=0.1)
self.prelu = nn.PReLU()
def forward(self, x, residual=False):
re = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
#out = self.dropout(out)
#re = self.residual(x)
#out = out + re
if residual:
return out
else:
out = self.prelu(out)
return out
class bottleneck_up(nn.Module):
def __init__(self, in_channels, out_channels):
super(bottleneck_up, self).__init__()
temp_channels = in_channels//4
if in_channels < 4:
temp_channels = in_channels
self.conv1 = nn.Sequential( nn.ConvTranspose2d(in_channels, temp_channels, 3, 2, 1, 1),
nn.BatchNorm2d(temp_channels),
nn.PReLU() )
self.conv2 = Conv2D_BatchNorm_Relu(temp_channels, temp_channels, 3, 1, 1, dilation=1)
#self.conv3 = Conv2D_BatchNorm_Relu(temp_channels, out_channels, 1, 0, 1)
self.conv3 = nn.Conv2d(temp_channels, out_channels, 1, padding=0, stride=1, bias=True)
#self.residual = nn.ConvTranspose2d(in_channels, out_channels, 3, 2, 1, 1)
#self.residual = nn.Sequential( nn.ConvTranspose2d(in_channels, out_channels, 3, 2, 1, 1),
# nn.BatchNorm2d(out_channels),
# nn.ReLU() )
self.residual = nn.Upsample(size=None, scale_factor=2, mode='bilinear')
self.dropout = nn.Dropout2d(p=0.1)
self.prelu = nn.PReLU()
def forward(self, x):
re = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
#out = self.dropout(out)
#re = self.residual(re)
#out = out + re
#out = self.prelu(out)
return out
class bottleneck_dilation(nn.Module):
def __init__(self, in_channels, out_channels):
super(bottleneck_dilation, self).__init__()
temp_channels = in_channels//4
if in_channels < 4:
temp_channels = in_channels
self.conv1 = Conv2D_BatchNorm_Relu(in_channels, temp_channels, 1, 0, 1)
self.conv2 = Conv2D_BatchNorm_Relu(temp_channels, temp_channels, 3, 1, 1, dilation=1)
self.conv3 = nn.Conv2d(temp_channels, out_channels, 1, padding=0, stride=1, bias=True)
#self.residual = Conv2D_BatchNorm_Relu(in_channels, out_channels, 1, 0, 1)
self.dropout = nn.Dropout2d(p=0.1)
self.prelu = nn.PReLU()
def forward(self, x, residual=False):
re = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
#out = self.dropout(out)
#re = self.residual(x)
#out = out + re
if residual:
return out
else:
out = self.prelu(out)
return out
class Output(nn.Module):
def __init__(self, in_size, out_size):
super(Output, self).__init__()
self.conv1 = Conv2D_BatchNorm_Relu(in_size, in_size//2, 3, 1, 1, dilation=1)
self.conv2 = Conv2D_BatchNorm_Relu(in_size//2, in_size//4, 3, 1, 1, dilation=1)
self.conv3 = Conv2D_BatchNorm_Relu(in_size//4, out_size, 1, 0, 1, acti = False)
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
return outputs
class hourglass_same(nn.Module):
def __init__(self, in_channels, out_channels):
super(hourglass_same, self).__init__()
self.down1 = bottleneck_down(in_channels, out_channels)
self.down2 = bottleneck_down(out_channels, out_channels)
self.down3 = bottleneck_down(out_channels, out_channels)
self.down4 = bottleneck_down(out_channels, out_channels)
self.same1 = bottleneck_dilation(out_channels, out_channels)
self.same2 = bottleneck_dilation(out_channels, out_channels)
self.same3 = bottleneck_dilation(out_channels, out_channels)
self.same4 = bottleneck_dilation(out_channels, out_channels)
self.up1 = bottleneck_up(out_channels, out_channels)
self.up2 = bottleneck_up(out_channels, out_channels)
self.up3 = bottleneck_up(out_channels, out_channels)
self.up4 = bottleneck_up(out_channels, out_channels)
self.residual1 = bottleneck_down(out_channels, out_channels)
self.residual2 = bottleneck_down(out_channels, out_channels)
self.residual3 = bottleneck_down(out_channels, out_channels)
self.residual4 = bottleneck_down(in_channels, out_channels)
#self.residual = nn.MaxPool2d(2, 2)
self.bn = nn.BatchNorm2d(out_channels)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
self.bn4 = nn.BatchNorm2d(out_channels)
self.prelu = nn.PReLU()
def forward(self, inputs):
outputs1 = self.down1(inputs) # 64*32 -> 32*16
outputs2 = self.down2(outputs1) # 32*16 -> 16*8
outputs3 = self.down3(outputs2) # 16*8 -> 8*4
outputs4 = self.down4(outputs3) # 8*4 -> 4*2
outputs = self.same1(outputs4) # 4*2 -> 4*2
feature = self.same2(outputs, True) # 4*2 -> 4*2
outputs = self.same3(self.prelu(self.bn(feature))) # 4*2 -> 4*2
outputs = self.same4(outputs, True) # 4*2 -> 4*2
outputs = self.up1( self.prelu(self.bn1(outputs + self.residual1(outputs3, True))) )
outputs = self.up2( self.prelu(self.bn2(outputs + self.residual2(outputs2, True))) )
outputs = self.up3( self.prelu(self.bn3(outputs + self.residual3(outputs1, True))) )
outputs = self.up4( self.prelu(self.bn4(outputs + self.residual4(inputs, True))) )
#outputs = self.up3( self.prelu(self.bn3(outputs)) )
#outputs = self.up4( self.prelu(self.bn4(outputs)) )
#outputs = self.prelu(outputs)
return outputs, feature
class resize_layer(nn.Module):
def __init__(self, in_channels, out_channels, acti = True):
super(resize_layer, self).__init__()
self.conv1 = Conv2D_BatchNorm_Relu(in_channels, out_channels//4, 3, 1, 2, dilation=1, acti = False)
self.conv2 = Conv2D_BatchNorm_Relu(out_channels//4, out_channels//2, 3, 1, 2, dilation=1, acti = False)
self.conv3 = Conv2D_BatchNorm_Relu(out_channels//2, out_channels//1, 3, 1, 2, dilation=1, acti = False)
self.maxpool = nn.MaxPool2d(2, 2)
self.bn1 = nn.BatchNorm2d(out_channels//4)
self.bn2 = nn.BatchNorm2d(out_channels//2)
self.bn3 = nn.BatchNorm2d(out_channels//1)
self.prelu = nn.PReLU()
def forward(self, inputs):
#re = self.maxpool(inputs)
outputs = self.conv1(inputs)
outputs = self.bn1(outputs)
#outputs = torch.cat((outputs, re),1)
outputs = self.prelu(outputs)
#re = self.maxpool(outputs)
outputs = self.conv2(outputs)
outputs = self.bn2(outputs)
#outputs = torch.cat((outputs, re),1)
outputs = self.prelu(outputs)
#re = self.maxpool(outputs)
outputs = self.conv3(outputs)
#outputs = self.bn3(outputs)
#outputs = torch.cat((outputs, re),1)
# #outputs = self.prelu(outputs)
return outputs
class hourglass_block(nn.Module):
def __init__(self, in_channels, out_channels, acti = True, input_re=True):
super(hourglass_block, self).__init__()
self.layer1 = hourglass_same(in_channels, out_channels)
self.re1 = bottleneck_dilation(out_channels, out_channels)
self.re2 = nn.Conv2d(out_channels, out_channels, 1, padding=0, stride=1, bias=True, dilation=1)
self.re3 = nn.Conv2d(1, out_channels, 1, padding=0, stride=1, bias=True, dilation=1)
self.out_confidence = Output(out_channels, 1)
self.out_offset = Output(out_channels, 2)
self.out_instance = Output(out_channels, p.feature_size)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(1)
self.input_re = input_re
self.prelu = nn.PReLU()
self.dropout = nn.Dropout2d(p=0.1)
def forward(self, inputs):
inputs_a = self.prelu(self.bn1(inputs))
outputs, feature = self.layer1(inputs_a)
outputs_a = self.bn2(outputs)
outputs_a = self.prelu(outputs_a)
outputs_a = self.re1(outputs_a)
outputs = self.re2(outputs_a)
out_confidence = self.out_confidence(outputs_a)
out_offset = self.out_offset(outputs_a)
out_instance = self.out_instance(outputs_a)
out = self.prelu( self.bn3(out_confidence) )
out = self.re3(out)
#out = self.dropout(out)
if self.input_re:
outputs = outputs + out + inputs
else:
outputs = outputs + out
return [out_confidence, out_offset, out_instance], outputs, feature
| 38.35443
| 131
| 0.607096
|
87d599367e2119855815b539cb6f6f9661600024
| 452
|
py
|
Python
|
replierClass.py
|
jujinesy/pl-Steve28-lq_Python_ForKakaoBot
|
f613b8d7377150a5458b5738d281cfb81c82beac
|
[
"MIT"
] | 7
|
2020-10-13T14:41:21.000Z
|
2020-11-12T09:36:41.000Z
|
replierClass.py
|
jujinesy/pl-Steve28-lq_Python_ForKakaoBot
|
f613b8d7377150a5458b5738d281cfb81c82beac
|
[
"MIT"
] | null | null | null |
replierClass.py
|
jujinesy/pl-Steve28-lq_Python_ForKakaoBot
|
f613b8d7377150a5458b5738d281cfb81c82beac
|
[
"MIT"
] | 2
|
2020-10-30T03:39:58.000Z
|
2020-10-30T05:05:42.000Z
|
data = []
class replier:
global data
def __init__(self, packName, room, isDebugChat):
self.isDebugChat = isDebugChat
self.packName = packName
self.room = room
self.data = data
def clear(self):
del data[:]
def reply(self, msg):
data.append(msg)
print(data)
class KakaoLink:
global data
def send(self, room, args, type):
data.append({
'room' : room,
'args' : args,
'type' : type
})
| 17.384615
| 50
| 0.599558
|
d54930a413bdd87764aba5d721e26de74100bd1e
| 573
|
py
|
Python
|
config/celery_app.py
|
DevSusu/uudaemon
|
e1184a4e2f596611db3595398bb81fc5d3e6af6e
|
[
"MIT"
] | null | null | null |
config/celery_app.py
|
DevSusu/uudaemon
|
e1184a4e2f596611db3595398bb81fc5d3e6af6e
|
[
"MIT"
] | 1
|
2022-03-01T09:07:12.000Z
|
2022-03-01T09:07:12.000Z
|
config/celery_app.py
|
DevSusu/uudaemon
|
e1184a4e2f596611db3595398bb81fc5d3e6af6e
|
[
"MIT"
] | null | null | null |
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("uudaemon")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 31.833333
| 72
| 0.78185
|
dbecfb1a26b666f96f07cf6f06afbfe696ab6e35
| 1,612
|
py
|
Python
|
tests/test_util.py
|
rweickelt/sphinx
|
1a4c41a7691e8f78d42e2db221192962c53b27df
|
[
"BSD-2-Clause"
] | 1
|
2021-02-17T04:27:16.000Z
|
2021-02-17T04:27:16.000Z
|
tests/test_util.py
|
rweickelt/sphinx
|
1a4c41a7691e8f78d42e2db221192962c53b27df
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_util.py
|
rweickelt/sphinx
|
1a4c41a7691e8f78d42e2db221192962c53b27df
|
[
"BSD-2-Clause"
] | 1
|
2018-11-29T06:45:05.000Z
|
2018-11-29T06:45:05.000Z
|
# -*- coding: utf-8 -*-
"""
test_util
~~~~~~~~~~~~~~~
Tests util functions.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx.util import encode_uri, split_docinfo
def test_encode_uri():
expected = (u'https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_'
u'%D1%83%D0%BF%D1%80%D0%B0%D0%B2%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F_'
u'%D0%B1%D0%B0%D0%B7%D0%B0%D0%BC%D0%B8_%D0%B4%D0%B0%D0%BD%D0%BD%D1%8B%D1%85')
uri = (u'https://ru.wikipedia.org/wiki'
u'/Система_управления_базами_данных')
assert expected, encode_uri(uri)
expected = (u'https://github.com/search?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+is%3A'
u'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
uri = (u'https://github.com/search?utf8=✓&q=is%3Aissue+is%3Aopen+is%3A'
u'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
assert expected, encode_uri(uri)
def test_splitdocinfo():
source = "Hello world.\n"
docinfo, content = split_docinfo(source)
assert docinfo == ''
assert content == 'Hello world.\n'
source = ":orphan:\n\nHello world.\n"
docinfo, content = split_docinfo(source)
assert docinfo == ':orphan:\n'
assert content == '\nHello world.\n'
source = ":author: Georg Brandl\n:title: Manual of Sphinx\n\nHello world.\n"
docinfo, content = split_docinfo(source)
assert docinfo == ':author: Georg Brandl\n:title: Manual of Sphinx\n'
assert content == '\nHello world.\n'
| 36.636364
| 93
| 0.645161
|
6f5630a27ba643ee41f05a2373014ea8fb2ba0a0
| 6,459
|
py
|
Python
|
myfitnesspaw/types.py
|
hooman130/myfitnesspaw
|
3523b0efae2af3d590205d8ace06978ee136b8fe
|
[
"MIT"
] | 2
|
2020-09-16T02:50:28.000Z
|
2020-09-21T16:43:41.000Z
|
myfitnesspaw/types.py
|
hooman130/myfitnesspaw
|
3523b0efae2af3d590205d8ace06978ee136b8fe
|
[
"MIT"
] | null | null | null |
myfitnesspaw/types.py
|
hooman130/myfitnesspaw
|
3523b0efae2af3d590205d8ace06978ee136b8fe
|
[
"MIT"
] | 1
|
2021-10-15T16:47:18.000Z
|
2021-10-15T16:47:18.000Z
|
from dataclasses import dataclass
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Dict, List
import matplotlib.pyplot as plt
import numpy as np
from myfitnesspal.exercise import Exercise
from myfitnesspal.meal import Meal
from . import styles
@dataclass
class MaterializedDay:
"""
A class to hold the properties from myfitnesspal that we are working with.
"""
username: str
date: datetime.date
meals: List[Meal]
exercises: List[Exercise]
goals: Dict[str, float]
notes: Dict # currently python-myfitnesspal only scrapes food notes
water: float
measurements: Dict[str, float]
@dataclass
class Style:
bg0: str
bg1: str
bg2: str
fg0: str
fg1: str
fg2: str
text0: str
text1: str
text2: str
accent0: str
accent1: str
gray0: str
gray1: str
warning: str
error: str
@dataclass
class User:
username: str
email: str
class ProgressReport:
template_name: str = "mfp_progress_report.jinja2"
email_from: str = "Lisko Home Automation"
def __init__(
self,
user: User,
report_data,
report_style_name: str = "default",
):
self.user = user
self.data = report_data.get("data_table", None)
self.period_start_date = report_data.get("starting_date", None)
self.current_day_number = self.data[-1][0]
self.email_subject = (
f"MyfitnessPaw Progress Report (Day {self.current_day_number})"
)
self.email_to = user.email
self.end_goal = report_data.get("end_goal", None)
self.num_rows_report_tbl = report_data.get("num_rows_report_tbl", 7)
style_pallete = styles.COLOR_PALETTES.get(report_style_name)
self.style = Style(**style_pallete)
self.attachments = [self._render_progress_bar_chart()]
@property
def period_start_date(self):
return self._period_start_date
@period_start_date.setter
def period_start_date(self, value: str):
self._period_start_date = datetime.strptime(value, "%Y-%m-%d")
def get_template_data_dict(self):
current_day_number = self.data[-1][0] # first field in last table row
title = f"MyFitnessPaw Progress Report (Day {current_day_number})"
user = f"{self.user.username}".capitalize()
today = datetime.now().strftime("%d %b %Y")
generated_ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nutrition_tbl_header = [
"day",
"date",
"cal target",
"deficit target",
"deficit actual",
"running deficit",
]
nutrition_tbl_data = self._prepare_nutrition_table()
return {
"title": title,
"user": user,
"today": today,
"nutrition_tbl_header": nutrition_tbl_header,
"nutrition_tbl_data": nutrition_tbl_data,
"generated_ts": generated_ts,
}
def get_template_style_dict(self):
return {
"title_bg_color": self.style.fg1,
"title_text_color": self.style.text2,
"article_bg_color": self.style.bg0,
"article_text_color": self.style.text2,
"table_border_color": self.style.fg1,
"table_bg_header": self.style.bg2,
"table_bg_color1": self.style.bg1,
"table_bg_color2": self.style.bg2,
"table_text_color": self.style.text2,
"footer_bg_color": self.style.text2,
"footer_text_color": self.style.text0,
"footer_link_color": self.style.accent0,
}
def _render_progress_bar_chart(self):
nutrition_tbl_data = self._prepare_nutrition_table()
yesterday_tbl_row = nutrition_tbl_data[-1]
current_date = yesterday_tbl_row[1]
deficit_actual = yesterday_tbl_row[4]
deficit_accumulated = yesterday_tbl_row[5]
if deficit_actual < 0:
deficit_remaining = (
self.end_goal - deficit_accumulated + abs(deficit_actual)
)
current_date_data = (
(
deficit_accumulated - abs(deficit_actual),
abs(deficit_actual),
deficit_remaining + deficit_actual,
),
"warning",
)
else:
deficit_remaining = self.end_goal - deficit_accumulated - deficit_actual
current_date_data = (
(
deficit_accumulated - deficit_actual,
deficit_actual,
deficit_remaining,
),
"accent0",
)
chart_data = {current_date: current_date_data}
color = list(chart_data.values())[0][1]
vals = tuple(chart_data.values())[0][0]
category_colors = [
self.style.gray1,
self.style.warning if color == "warning" else self.style.accent0,
self.style.gray0,
]
labels = list(chart_data.keys())
data = np.array(list(vals))
data_cum = data.cumsum()
fig = plt.figure(figsize=(5.5, 0.7))
ax = fig.add_subplot(111)
fig.set_facecolor("#00000000")
ax.set_axis_off()
ax.set_ymargin(0.5)
ax.set_xlim(0, np.sum(data, axis=0).max())
goals_bar = ax.barh( # noqa
labels,
width=data,
left=data_cum[:] - data,
color=category_colors,
)
our_dir = Path().absolute()
chart_dir = our_dir.joinpath(Path("tmp"))
chart_dir.mkdir(exist_ok=True)
chart_file = chart_dir.joinpath(Path("temp.png"))
plt.savefig(chart_file)
return chart_file
def _prepare_nutrition_table(self):
yesterday_str = (date.today() - timedelta(days=1)).strftime("%d-%b-%Y")
# row[4] is the deficit actual for yesterday
# we skip days where actual deficit is NULL when we prepare the table
report_window_data = [row for row in self.data if row[4] is not None]
# if report starts from today or yesterday has no entered info:
if not report_window_data or report_window_data[-1][1] != yesterday_str:
return {}
nutrition_tbl_data = report_window_data[(self.num_rows_report_tbl * -1) :]
return nutrition_tbl_data
def render(self):
pass
| 31.507317
| 84
| 0.600248
|
2137e9e707f5797b3f9eb54d154b02ac384581d0
| 6,125
|
py
|
Python
|
lux/vislib/altair/AltairRenderer.py
|
vyomtech/lux
|
4d46ad9d98ad674508c7dadc4131b4707e4cac90
|
[
"Apache-2.0"
] | 2
|
2021-12-05T10:11:32.000Z
|
2022-01-23T21:43:03.000Z
|
lux/vislib/altair/AltairRenderer.py
|
vyomtech/lux
|
4d46ad9d98ad674508c7dadc4131b4707e4cac90
|
[
"Apache-2.0"
] | null | null | null |
lux/vislib/altair/AltairRenderer.py
|
vyomtech/lux
|
4d46ad9d98ad674508c7dadc4131b4707e4cac90
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lux
import pandas as pd
from typing import Callable
from lux.vislib.altair.BarChart import BarChart
from lux.vislib.altair.ScatterChart import ScatterChart
from lux.vislib.altair.LineChart import LineChart
from lux.vislib.altair.Histogram import Histogram
from lux.vislib.altair.Heatmap import Heatmap
from lux.vislib.altair.Choropleth import Choropleth
class AltairRenderer:
"""
Renderer for Charts based on Altair (https://altair-viz.github.io/)
"""
def __init__(self, output_type="VegaLite"):
self.output_type = output_type
def __repr__(self):
return f"AltairRenderer"
def create_vis(self, vis, standalone=True):
"""
Input Vis object and return a visualization specification
Parameters
----------
vis: lux.vis.Vis
Input Vis (with data)
standalone: bool
Flag to determine if outputted code uses user-defined variable names or can be run independently
Returns
-------
chart : altair.Chart
Output Altair Chart Object
"""
# Lazy Evaluation for 2D Binning
if vis.approx:
if vis.mark == "scatter" and vis._postbin:
vis._mark = "heatmap"
lux.config.executor.execute_2D_binning(vis)
else:
# Exactly recompute the selected vis (e.g., top k) to display
lux.config.executor.execute([vis], vis._original_df, approx=False)
# If a column has a Period dtype, or contains Period objects, convert it back to Datetime
if vis.data is not None:
for attr in list(vis.data.columns):
if pd.api.types.is_period_dtype(vis.data.dtypes[attr]) or isinstance(
vis.data[attr].iloc[0], pd.Period
):
dateColumn = vis.data[attr]
vis.data[attr] = pd.PeriodIndex(dateColumn.values).to_timestamp()
if pd.api.types.is_interval_dtype(vis.data.dtypes[attr]) or isinstance(
vis.data[attr].iloc[0], pd.Interval
):
vis.data[attr] = vis.data[attr].astype(str)
if isinstance(attr, str):
if "." in attr:
attr_clause = vis.get_attr_by_attr_name(attr)[0]
# Suppress special character ".", not displayable in Altair
# attr_clause.attribute = attr_clause.attribute.replace(".", "")
vis._vis_data = vis.data.rename(columns={attr: attr.replace(".", "")})
if vis.mark == "histogram":
chart = Histogram(vis)
elif vis.mark == "bar":
chart = BarChart(vis)
elif vis.mark == "scatter":
chart = ScatterChart(vis)
elif vis.mark == "line":
chart = LineChart(vis)
elif vis.mark == "heatmap":
chart = Heatmap(vis)
elif vis.mark == "geographical":
chart = Choropleth(vis)
else:
chart = None
if chart:
if lux.config.plotting_style and (
lux.config.plotting_backend == "vegalite" or lux.config.plotting_backend == "altair"
):
chart.chart = lux.config.plotting_style(chart.chart)
if self.output_type == "VegaLite":
chart_dict = chart.chart.to_dict()
# this is a bit of a work around because altair must take a pandas dataframe and we can only generate a luxDataFrame
# chart["data"] = { "values": vis.data.to_dict(orient='records') }
# chart_dict["width"] = 160
# chart_dict["height"] = 150
chart_dict["vislib"] = "vegalite"
return chart_dict
elif self.output_type == "Altair":
import inspect
if lux.config.plotting_style:
chart.code += "\n".join(
inspect.getsource(lux.config.plotting_style).split("\n ")[1:-1]
)
chart.code += "\nchart"
chart.code = chart.code.replace("\n\t\t", "\n")
var = vis._source
if var is not None:
all_vars = []
for f_info in inspect.getouterframes(inspect.currentframe()):
local_vars = f_info.frame.f_back
if local_vars:
callers_local_vars = local_vars.f_locals.items()
possible_vars = [
var_name for var_name, var_val in callers_local_vars if var_val is var
]
all_vars.extend(possible_vars)
found_variable = [
possible_var for possible_var in all_vars if possible_var[0] != "_"
][0]
else: # if vis._source was not set when the Vis was created
found_variable = "df"
if standalone:
chart.code = chart.code.replace(
"placeholder_variable",
f"pd.DataFrame({str(vis.data.to_dict())})",
)
else:
# TODO: Placeholder (need to read dynamically via locals())
chart.code = chart.code.replace("placeholder_variable", found_variable)
return chart.code
| 43.439716
| 132
| 0.554939
|
762314bcfab8dbfdc238723be4badd6fc3a18275
| 5,805
|
py
|
Python
|
google/ads/google_ads/v0/proto/common/value_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v0/proto/common/value_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v0/proto/common/value_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v0/proto/common/value.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/common/value.proto',
package='google.ads.googleads.v0.common',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v0.commonB\nValueProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v0/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V0.Common\312\002\036Google\\Ads\\GoogleAds\\V0\\Common\352\002\"Google::Ads::GoogleAds::V0::Common'),
serialized_pb=_b('\n0google/ads/googleads_v0/proto/common/value.proto\x12\x1egoogle.ads.googleads.v0.common\"\x87\x01\n\x05Value\x12\x17\n\rboolean_value\x18\x01 \x01(\x08H\x00\x12\x15\n\x0bint64_value\x18\x02 \x01(\x03H\x00\x12\x15\n\x0b\x66loat_value\x18\x03 \x01(\x02H\x00\x12\x16\n\x0c\x64ouble_value\x18\x04 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x05 \x01(\tH\x00\x42\x07\n\x05valueB\xe5\x01\n\"com.google.ads.googleads.v0.commonB\nValueProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v0/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V0.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V0\\Common\xea\x02\"Google::Ads::GoogleAds::V0::Commonb\x06proto3')
)
_VALUE = _descriptor.Descriptor(
name='Value',
full_name='google.ads.googleads.v0.common.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='boolean_value', full_name='google.ads.googleads.v0.common.Value.boolean_value', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int64_value', full_name='google.ads.googleads.v0.common.Value.int64_value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float_value', full_name='google.ads.googleads.v0.common.Value.float_value', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='double_value', full_name='google.ads.googleads.v0.common.Value.double_value', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='string_value', full_name='google.ads.googleads.v0.common.Value.string_value', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='google.ads.googleads.v0.common.Value.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=85,
serialized_end=220,
)
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['boolean_value'])
_VALUE.fields_by_name['boolean_value'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['int64_value'])
_VALUE.fields_by_name['int64_value'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['float_value'])
_VALUE.fields_by_name['float_value'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['double_value'])
_VALUE.fields_by_name['double_value'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['string_value'])
_VALUE.fields_by_name['string_value'].containing_oneof = _VALUE.oneofs_by_name['value']
DESCRIPTOR.message_types_by_name['Value'] = _VALUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict(
DESCRIPTOR = _VALUE,
__module__ = 'google.ads.googleads_v0.proto.common.value_pb2'
,
__doc__ = """A generic data container.
Attributes:
value:
A value.
boolean_value:
A boolean.
int64_value:
An int64.
float_value:
A float.
double_value:
A double.
string_value:
A string.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.common.Value)
))
_sym_db.RegisterMessage(Value)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 43
| 690
| 0.749526
|
be404df1a3076ca374670d814dc715f40782588a
| 4,644
|
py
|
Python
|
pinax/stripe/tests/__init__.py
|
proitm/pinax-stripe
|
c5b05f699fde6bd66cc633669604e5a6d1158d8a
|
[
"MIT"
] | 375
|
2015-11-14T17:43:26.000Z
|
2021-11-16T10:59:02.000Z
|
pinax/stripe/tests/__init__.py
|
proitm/pinax-stripe
|
c5b05f699fde6bd66cc633669604e5a6d1158d8a
|
[
"MIT"
] | 470
|
2015-11-15T20:41:49.000Z
|
2021-11-27T07:22:41.000Z
|
pinax/stripe/tests/__init__.py
|
proitm/pinax-stripe
|
c5b05f699fde6bd66cc633669604e5a6d1158d8a
|
[
"MIT"
] | 205
|
2015-11-17T20:25:28.000Z
|
2021-11-22T21:56:52.000Z
|
TRANSFER_CREATED_TEST_DATA = {
"created": 1348360173,
"data": {
"object": {
"amount": 455,
"currency": "usd",
"date": 1348876800,
"description": None,
"id": "tr_XXXXXXXXXXXX",
"livemode": True,
"object": "transfer",
"other_transfers": [],
"reversed": False,
"status": "paid",
"summary": {
"adjustment_count": 0,
"adjustment_fee_details": [],
"adjustment_fees": 0,
"adjustment_gross": 0,
"charge_count": 1,
"charge_fee_details": [{
"amount": 45,
"application": None,
"currency": "usd",
"description": None,
"type": "stripe_fee"
}],
"charge_fees": 45,
"charge_gross": 500,
"collected_fee_count": 0,
"collected_fee_gross": 0,
"currency": "usd",
"net": 455,
"refund_count": 0,
"refund_fees": 0,
"refund_gross": 0,
"validation_count": 0,
"validation_fees": 0
}
}
},
"id": "evt_XXXXXXXXXXXX",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"type": "transfer.created"
}
TRANSFER_CREATED_TEST_DATA2 = {
"created": 1348360173,
"data": {
"object": {
"amount": 1455,
"currency": "usd",
"date": 1348876800,
"description": None,
"id": "tr_XXXXXXXXXXX2",
"livemode": True,
"object": "transfer",
"other_transfers": [],
"status": "paid",
"reversed": False,
"summary": {
"adjustment_count": 0,
"adjustment_fee_details": [],
"adjustment_fees": 0,
"adjustment_gross": 0,
"charge_count": 1,
"charge_fee_details": [{
"amount": 45,
"application": None,
"currency": "usd",
"description": None,
"type": "stripe_fee"
}],
"charge_fees": 45,
"charge_gross": 1500,
"collected_fee_count": 0,
"collected_fee_gross": 0,
"currency": "usd",
"net": 1455,
"refund_count": 0,
"refund_fees": 0,
"refund_gross": 0,
"validation_count": 0,
"validation_fees": 0
}
}
},
"id": "evt_XXXXXXXXXXXY",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"type": "transfer.created"
}
TRANSFER_PENDING_TEST_DATA = {
"created": 1375603198,
"data": {
"object": {
"account": {
"bank_name": "BANK OF AMERICA, N.A.",
"country": "US",
"fingerprint": "xxxxxxxxxx",
"last4": "4444",
"object": "bank_account",
"validated": False
},
"amount": 941,
"currency": "usd",
"date": 1375747200,
"description": "STRIPE TRANSFER",
"fee": 0,
"fee_details": [],
"id": "tr_adlkj2l3kj23",
"livemode": True,
"object": "transfer",
"recipient": None,
"reversed": False,
"statement_descriptor": None,
"status": "pending"
}
},
"id": "evt_2l3kj232k223",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"request": None,
"type": "transfer.created"
}
PLAN_CREATED_TEST_DATA = {
"data": {
"previous_attributes": {
"name": "Old name"
},
"object": {
"interval": "month",
"amount": 50,
"id": "gold1",
"trial_period_days": None,
"livemode": True,
"statement_descriptor": None,
"interval_count": 1,
"object": "plan",
"currency": "usd",
"created": 1498573686,
"name": "Pro Plan",
"metadata": {}
}
},
"type": "plan.updated",
"request": None,
"api_version": "2017-06-05",
"object": "event",
"id": "evt_00000000000000",
"livemode": True,
"pending_webhooks": 1,
"created": 1326853478
}
| 28.84472
| 53
| 0.419035
|
e0bfcebfaa4f6f1edfaa4e4d4aae81ac6e28f2e1
| 677
|
py
|
Python
|
company_interview_qs/graph_representations.py
|
santoshmano/pybricks
|
bcb3ab80417e8e896280062494ce6c046329b7e8
|
[
"MIT"
] | null | null | null |
company_interview_qs/graph_representations.py
|
santoshmano/pybricks
|
bcb3ab80417e8e896280062494ce6c046329b7e8
|
[
"MIT"
] | null | null | null |
company_interview_qs/graph_representations.py
|
santoshmano/pybricks
|
bcb3ab80417e8e896280062494ce6c046329b7e8
|
[
"MIT"
] | null | null | null |
# Adjacency list
#
class Graph_1:
def __init__(self):
self.vertices = []
self.edges = []
# hashmap of vertex -> neigbors
Graph_2 = {}
class Graph:
def __init__(self):
self.vertices = []
class Vertex:
def __init__(self, val):
self.val = val
self.neighbors = []
def dfs(graph):
visited = set()
for v in graph.vertices:
if v not in visited:
_dfs(v, visited)
def _dfs(v, visited):
# process v and add to visited set
print(v.val)
visited.add(v)
# traverse its neighbors
for n in v.neighbors:
if n not in visited:
_dfs(n, visited)
#
# Adjacency matrix
| 16.119048
| 38
| 0.570162
|
d49f55aa3d7a2ac8f02f6acf16caec5b5ac526b9
| 443
|
py
|
Python
|
gui/account/migrations/0013_auto_20151120_1029.py
|
alpha-zou/TAMP
|
91f0e7b08e2d6a03b541b07dd4768bf5222044dd
|
[
"MIT"
] | 1
|
2020-03-20T06:52:07.000Z
|
2020-03-20T06:52:07.000Z
|
gui/account/migrations/0013_auto_20151120_1029.py
|
alpha-zou/TAMP
|
91f0e7b08e2d6a03b541b07dd4768bf5222044dd
|
[
"MIT"
] | 1
|
2021-11-12T15:20:56.000Z
|
2021-11-12T15:20:56.000Z
|
gui/account/migrations/0013_auto_20151120_1029.py
|
alpha-zou/TAMP
|
91f0e7b08e2d6a03b541b07dd4768bf5222044dd
|
[
"MIT"
] | 3
|
2019-03-10T19:56:17.000Z
|
2020-03-20T07:00:10.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0012_auto_20151120_1025'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='uploaded_collection',
field=models.CharField(max_length=30, null=True, blank=True),
),
]
| 22.15
| 73
| 0.62754
|
31f8143105e64bd9a34fb3cf5d4c96e93973210a
| 2,071
|
py
|
Python
|
sources/models/libraries/library_reference_model.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | 1
|
2021-06-23T20:19:45.000Z
|
2021-06-23T20:19:45.000Z
|
sources/models/libraries/library_reference_model.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | null | null | null |
sources/models/libraries/library_reference_model.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2020 Pablo Rodriguez Nava, @pablintino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import relationship
from models.libraries.storable_library_model import StorableLibraryModel
class LibraryReference(StorableLibraryModel):
__tablename__ = "library_ref"
id = Column(Integer, primary_key=True)
symbol_path = Column(String(300))
symbol_ref = Column(String(150))
description = Column(String(200))
def get_file_path(self):
return self.symbol_path
def get_reference(self):
return self.symbol_ref
def set_file_path(self, path):
self.symbol_path = path
def set_reference(self, reference):
self.symbol_ref = reference
# relationships
library_components = relationship("ComponentModel", back_populates='library_ref', lazy=True)
def __repr__(self):
return "LibraryReference %s %s" % (
self.symbol_path,
self.symbol_ref,
)
| 35.706897
| 96
| 0.736359
|
cf84760108af75bf1a96294e43463d944de92747
| 617
|
py
|
Python
|
pyleecan/Methods/Slot/HoleMag/comp_mass_magnet_id.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | 4
|
2017-11-27T10:14:34.000Z
|
2018-09-20T11:30:32.000Z
|
pyleecan/Methods/Slot/HoleMag/comp_mass_magnet_id.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Slot/HoleMag/comp_mass_magnet_id.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
def comp_mass_magnet_id(self, index):
"""Compute the mass of the hole magnet of the corresponding index
Parameters
----------
self : HoleMag
A HoleMag object
index : int
Index of the magnet to compute the surface
Returns
-------
Mmag: float
Mass of the Magnet [m**2]
"""
mag_list = self.get_magnet_list()
if mag_list[index] is None:
return 0
else:
if mag_list[index].Lmag is None:
Lmag = self.parent.L1
else:
Lmag = mag_list[index].Lmag
return self.comp_surface_magnet_id(index) * Lmag
| 23.730769
| 69
| 0.583468
|
49bdaebeb8d4aa02a20737a9d39dc66d5c1f6e2b
| 6,369
|
py
|
Python
|
docs/conf.py
|
nattster/lettuce_webdriver
|
26b910ceef67d5b81030640ebbab0504bd59d643
|
[
"MIT"
] | 24
|
2015-02-04T14:49:51.000Z
|
2021-03-23T17:17:09.000Z
|
docs/conf.py
|
nattster/lettuce_webdriver
|
26b910ceef67d5b81030640ebbab0504bd59d643
|
[
"MIT"
] | 4
|
2015-07-13T22:41:22.000Z
|
2016-10-03T20:17:22.000Z
|
docs/conf.py
|
nattster/lettuce_webdriver
|
26b910ceef67d5b81030640ebbab0504bd59d643
|
[
"MIT"
] | 12
|
2015-01-24T02:05:39.000Z
|
2016-12-30T07:30:28.000Z
|
# -*- coding: utf-8 -*-
#
# pyramid_xmlrpc documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the
# namespace that aren't pickleable (module imports are okay, they're
# removed automatically).
#
# All configuration values have a default value; values that are commented
# out serve to show the default value.
import sys, os, datetime
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
parent = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.abspath(parent))
wd = os.getcwd()
os.chdir(parent)
os.system('%s setup.py test -q' % sys.executable)
os.chdir(wd)
for item in os.listdir(parent):
if item.endswith('.egg'):
sys.path.append(os.path.join(parent, item))
import pkginfo
# General configuration
# ---------------------
pkg_info = pkginfo.Develop(os.path.join(os.path.dirname(__file__),'..'))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Looks for bfg's objects
intersphinx_mapping = {'http://docs.pylonsproject.org/projects/pyramid/dev': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'pyramid_rpc'
copyright = '%s, Ben Bangert <ben@groovie.org>' % datetime.datetime.now().year
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = release = pkg_info.version
# The full version, including alpha/beta/rc tags.
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# Add and use Pylons theme
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'pyramid'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'repoze.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = '.static/logo_hi.gif'
# The name of an image file (within the static path) to use as favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as
# _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option must
# be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'rpcdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, document class [howto/manual]).
latex_documents = [
('index', 'rpc.tex', 'pyramid_rpc Documentation',
'Pylons Project Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the
# top of the title page.
latex_logo = '.static/logo_hi.gif'
# For "manual" documents, if this is true, then toplevel headings are
# parts, not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 30.473684
| 82
| 0.726645
|
d38c3d595e2bf2b095c24db6c7861973714b1b37
| 1,877
|
py
|
Python
|
sccloud/tools/net_regressor.py
|
klarman-cell-observatory/scCloud.py
|
5a04a2f22574db044d018656ac4705ec83840226
|
[
"BSD-3-Clause"
] | 3
|
2019-07-29T12:30:28.000Z
|
2019-09-20T17:15:35.000Z
|
sccloud/tools/net_regressor.py
|
klarman-cell-observatory/scCloud.py
|
5a04a2f22574db044d018656ac4705ec83840226
|
[
"BSD-3-Clause"
] | 3
|
2019-07-24T15:07:31.000Z
|
2019-08-29T13:57:36.000Z
|
sccloud/tools/net_regressor.py
|
klarman-cell-observatory/scCloud.py
|
5a04a2f22574db044d018656ac4705ec83840226
|
[
"BSD-3-Clause"
] | 3
|
2019-07-24T22:50:34.000Z
|
2020-12-08T01:19:34.000Z
|
import time
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.neural_network import MLPRegressor
import logging
logger = logging.getLogger("sccloud")
class MaxStdScaler(BaseEstimator, TransformerMixin):
def __init__(self, copy=True, factor=1.0):
self.factor = float(factor)
self.copy = copy
def fit(self, X):
X = check_array(X, copy=self.copy, estimator=self, dtype=np.float64)
self.scaler = np.max(np.std(X, axis=0, ddof=1)) / self.factor
return self
def transform(self, X):
X = check_array(X, copy=self.copy, estimator=self, dtype=np.float64)
X /= self.scaler
return X
def inverse_transform(self, X, copy=None):
if copy is None:
copy = self.copy
X = check_array(X, copy=copy, estimator=self, dtype=np.float64)
X *= self.scaler
return X
def net_train_and_predict(X_train, y_train, X_pred, alpha, random_state, verbose=False):
start_time = time.time()
scaler_x = MaxStdScaler()
X_train = scaler_x.fit_transform(X_train)
scaler_y = MaxStdScaler(factor=15.0)
y_train = scaler_y.fit_transform(y_train)
regressor = MLPRegressor(
hidden_layer_sizes=(100, 75, 50, 25),
activation="relu",
solver="sgd",
learning_rate="adaptive",
alpha=alpha,
random_state=random_state,
)
regressor.fit(X_train, y_train)
logger.info(regressor.loss_)
y_pred = scaler_y.inverse_transform(
regressor.predict(scaler_x.transform(X_pred)), copy=False
)
end_time = time.time()
if verbose:
logger.info(
"Deep regressor traning and predicting finished. Time spent = {:.2f}s.".format(
end_time - start_time
)
)
return y_pred
| 26.43662
| 91
| 0.647842
|
3c60d9e7f3c53fc98194c05c2b97608ea5a20763
| 4,880
|
py
|
Python
|
test2.py
|
CodeChefVIT/stress-detector
|
57e4c241b7a55e943ea7fabbf9cc50693c0033cc
|
[
"MIT"
] | 17
|
2020-05-15T04:04:53.000Z
|
2022-02-19T19:42:19.000Z
|
test2.py
|
CodeChefVIT/stress-detector
|
57e4c241b7a55e943ea7fabbf9cc50693c0033cc
|
[
"MIT"
] | 13
|
2020-04-16T12:19:35.000Z
|
2022-03-12T00:51:07.000Z
|
test2.py
|
CodeChefVIT/stress-detector
|
57e4c241b7a55e943ea7fabbf9cc50693c0033cc
|
[
"MIT"
] | 11
|
2020-04-15T09:43:07.000Z
|
2021-03-11T16:53:23.000Z
|
import dlib
import cv2
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from scipy.spatial import distance as dist
import imutils
from imutils import face_utils
import matplotlib.pyplot as plt
import numpy as np
class VideoCamera(object):
def __init__(cap):
#real time video capture
cap.video = cv2.VideoCapture(0)
def __del__(cap):
cap.video.release()
def get_frame(cap):
# while(True):
ret,frame = cap.video.read()
frame = cv2.flip(frame,1)
frame = imutils.resize(frame, width=500,height=500)
#gettting points of eye from the facial landmark
(lBegin, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
(rBegin, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
# getting lip points from facial landmarks
(l_lower, l_upper) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
#preprocessing the image
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
detections = detector(gray,0)
for detection in detections:
emotion= emotion_finder(detection,gray)
cv2.putText(frame, emotion, (10,10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
shape = predictor(frame,detection)
shape = face_utils.shape_to_np(shape)
leyebrow = shape[lBegin:lEnd]
reyebrow = shape[rBegin:rEnd]
openmouth = shape[l_lower:l_upper]
# figuring out convex shape
reyebrowhull = cv2.convexHull(reyebrow)
leyebrowhull = cv2.convexHull(leyebrow)
openmouthhull = cv2.convexHull(openmouth)
cv2.drawContours(frame, [reyebrowhull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [leyebrowhull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [openmouthhull], -1, (0, 255, 0), 1)
# Measuring lip distance and eye distance
lipdist = lpdist(openmouthhull[-1],openmouthhull[0])
eyedist = ebdist(leyebrow[-1],reyebrow[0])
stress_value,stress_label = normalize_values(points,eyedist, points_lip, lipdist)
#displaying stress levels and value
cv2.putText(frame, emotion, (10,10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (235, 52, 52), 2)
cv2.putText(frame,"stress value:{}".format(str(int(stress_value*100))),(10,40),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (51, 66, 232), 2)
cv2.putText(frame,"Stress level:{}".format((stress_label)),(10,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (35, 189, 25), 2)
cv2.imshow("Frame", frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def plt_show():
plot_stress=plt.plot(range(len(points)),points,'ro')
plt.title("Stress Levels")
plt.show()
return plot_stress
global points, points_lip, emotion_classifier, detector, predictor
#importing frontal facial landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
#loading the trained model
emotion_classifier = load_model("_mini_XCEPTION.102-0.66.hdf5", compile=False)
points=[]; points_lip=[]
#calculating eye distance in terms of the facial landmark
def ebdist(leye,reye):
eyedist = dist.euclidean(leye,reye)
points.append(int(eyedist))
return eyedist
#calculating lip dostance using facial landmark
def lpdist(l_lower,l_upper):
lipdist = dist.euclidean(l_lower, l_upper)
points_lip.append(int(lipdist))
return lipdist
#finding stressed or not using the emotions
def emotion_finder(faces,frame):
EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised","neutral"]
x,y,w,h = face_utils.rect_to_bb(faces)
frame = frame[y:y+h,x:x+w]
roi = cv2.resize(frame,(64,64))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi,axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
if label in ['scared','sad','angry']:
label = 'Stressed'
else:
label = 'Not Stressed'
return label
#calculating stress value using the distances
def normalize_values(points,disp,points_lip,dis_lip):
normalize_value_lip = abs(dis_lip - np.min(points_lip))/abs(np.max(points_lip) - np.min(points_lip))
normalized_value_eye =abs(disp - np.min(points))/abs(np.max(points) - np.min(points))
normalized_value =( normalized_value_eye + normalize_value_lip)/2
stress_value = (np.exp(-(normalized_value)))
if stress_value>=0.65:
stress_label="High Stress"
else:
stress_label="Low Stress"
return stress_value,stress_label
#processing real time video input to display stress
| 39.354839
| 139
| 0.661475
|
39556f33663e6add04256f117dd9db9cccfef9a3
| 3,500
|
py
|
Python
|
tests/test_qurl_templatetag.py
|
eltonplima/django-qurl-templatetag
|
000bff1e5f5a9a835016ed9cd565aa3c97104c30
|
[
"MIT"
] | 31
|
2015-03-17T02:25:49.000Z
|
2021-11-04T08:19:08.000Z
|
tests/test_qurl_templatetag.py
|
mohandoz/django-qurl-templatetag
|
8a785b112437d05cb54846b79012967fee1cb534
|
[
"MIT"
] | 167
|
2016-11-15T02:12:27.000Z
|
2020-04-21T21:27:48.000Z
|
tests/test_qurl_templatetag.py
|
mohandoz/django-qurl-templatetag
|
8a785b112437d05cb54846b79012967fee1cb534
|
[
"MIT"
] | 12
|
2015-03-12T14:54:58.000Z
|
2021-06-01T14:30:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_qurl_templatetag
----------------------------------
Tests for `qurl_templatetag` module.
"""
from django.test import TestCase
from django.template import Template, Context, TemplateSyntaxError
from qurl_templatetag.templatetags.qurl import Qurl
class QUrlTemplateTagTestCase(TestCase):
def test_qurl_append(self):
out = Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" a+="2" a-="1" %}'
).render(Context())
self.assertEqual(out, 'http://sophilabs.com/?a=2')
def test_qurl_set(self):
out = Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" a=None b="1" %}'
).render(Context())
self.assertEqual(out, 'http://sophilabs.com/?b=1')
def test_qurl_as(self):
context = Context()
Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" a=None as url %}'
).render(context)
self.assertEqual(context.get('url'), 'http://sophilabs.com/')
def test_qurl_inc(self):
out = Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" a++ %}'
).render(Context())
self.assertEqual(out, 'http://sophilabs.com/?a=2')
out = Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" b++ %}'
).render(Context())
self.assertEqual(out, 'http://sophilabs.com/?a=1&b=1')
def test_qurl_dec(self):
out = Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" a-- %}'
).render(Context())
self.assertEqual(out, 'http://sophilabs.com/?a=0')
out = Template(
'{% load qurl %}'
'{% qurl "http://sophilabs.com/?a=1" b-- %}'
).render(Context())
self.assertEqual(out, 'http://sophilabs.com/?a=1&b=-1')
def test_malformed(self):
template = """
{% load qurl %}
{% qurl "http://sophilabs.com/?a=1" a**2 %}
"""
self.assertRaises(TemplateSyntaxError, Template, template)
class QurlTestCase(TestCase):
def test_set(self):
qurl = Qurl('http://sophilabs.com/?a=1')
qurl = qurl.set('a', 2)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=2')
qurl = qurl.set('b', 1)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=2&b=1')
qurl = qurl.set('a', 3)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?b=1&a=3')
def test_add(self):
qurl = Qurl('http://sophilabs.com/?a=1')
qurl = qurl.add('a', 2)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=1&a=2')
qurl = qurl.add('b', 9)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=1&a=2&b=9')
def test_remove(self):
qurl = Qurl('http://sophilabs.com/?a=1&a=3')
qurl = qurl.remove('a', 3)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=1')
def test_inc(self):
qurl = Qurl('http://sophilabs.com/?a=1')
qurl = qurl.inc('a', 1)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=2')
qurl = qurl.inc('b', 1)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=2&b=1')
def test_dec(self):
qurl = Qurl('http://sophilabs.com/?a=4')
qurl = qurl.dec('a', 1)
self.assertEqual(qurl.get(), 'http://sophilabs.com/?a=3')
| 29.661017
| 73
| 0.532286
|
41cbbe945de67a903f7bf235ffebbcb63b3e0c99
| 833
|
py
|
Python
|
Chapter08/8A_OpenBankAPI/props/socgen2_k_local.py
|
uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking
|
3a10a14194368478bb8b78d3d17e9c6a7b7253db
|
[
"MIT"
] | 115
|
2020-06-18T15:00:58.000Z
|
2022-03-02T10:13:19.000Z
|
Chapter08/8A_OpenBankAPI/props/socgen2_k_local.py
|
uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking
|
3a10a14194368478bb8b78d3d17e9c6a7b7253db
|
[
"MIT"
] | 9
|
2016-03-13T06:58:16.000Z
|
2021-11-30T10:06:47.000Z
|
Chapter08/8A_OpenBankAPI/props/socgen2_k_local.py
|
uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking
|
3a10a14194368478bb8b78d3d17e9c6a7b7253db
|
[
"MIT"
] | 60
|
2020-07-22T14:53:10.000Z
|
2022-03-23T10:17:59.000Z
|
# This is just for socgen-k test, you make sure Socgen-k server is working well.
# https://socgen-k-api.openbankproject.com/
# API server URL
BASE_URL = "http://127.0.0.1:8080"
API_VERSION = "v2.0.0"
API_VERSION_V210 = "v2.1.0"
# API server will redirect your browser to this URL, should be non-functional
# You will paste the redirect location here when running the script
CALLBACK_URI = 'http://127.0.0.1/cb'
# login user:
USERNAME = '1000203892'
PASSWORD = 'fffffffffffffffff'
CONSUMER_KEY = 'gmtcx4letf2isej1slxhpphtnt2jkt30ldazvkmd'
# fromAccount info:
FROM_BANK_ID = '00100'
FROM_ACCOUNT_ID = '410ad4eb-9f63-300f-8cb9-12f0ab677521'
TO_BANK_ID = '00100'
TO_ACCOUNT_ID = '410ad4eb-9f63-300f-8cb9-12f0ab677521'
# Our currency to use
OUR_CURRENCY = 'XAF'
# Our value to transfer
OUR_VALUE = '10'
OUR_VALUE_LARGE = '1001.00'
| 27.766667
| 80
| 0.753902
|
911b8570dbe4dd13160970c51c2cd287f8cc9dae
| 4,147
|
py
|
Python
|
myuw/test/views/test_rest_search.py
|
uw-it-aca/myuw
|
3fa1fabeb3c09d81a049f7c1a8c94092d612438a
|
[
"Apache-2.0"
] | 18
|
2015-02-04T01:09:11.000Z
|
2021-11-25T03:10:39.000Z
|
myuw/test/views/test_rest_search.py
|
uw-it-aca/myuw
|
3fa1fabeb3c09d81a049f7c1a8c94092d612438a
|
[
"Apache-2.0"
] | 2,323
|
2015-01-15T19:45:10.000Z
|
2022-03-21T19:57:06.000Z
|
myuw/test/views/test_rest_search.py
|
uw-it-aca/myuw
|
3fa1fabeb3c09d81a049f7c1a8c94092d612438a
|
[
"Apache-2.0"
] | 9
|
2015-01-15T19:29:26.000Z
|
2022-02-11T04:51:23.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from django.test.utils import override_settings
from django.urls import reverse
from myuw.test.api import MyuwApiTest
@override_settings(
RESTCLIENTS_ADMIN_AUTH_MODULE='rc_django.tests.can_proxy_restclient')
class RestSearchViewTest(MyuwApiTest):
def test_post(self):
self.set_user('javerage')
# hfs
url = reverse("myuw_rest_search", args=["hfs", "accounts"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, "/restclients/view/hfs/myuw/v1/javerage")
# bookstore
url = reverse("myuw_rest_search", args=["book", "index"])
response = self.client.post(url, {
"sln1": "123", "quarter": "spring", "returnlink": "t"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/book/uw/json_utf8_202007.ubs%3F"
"quarter=spring&sln1=123&returnlink=t"))
# myplan
url = reverse("myuw_rest_search", args=["myplan", "index"])
response = self.client.post(url, {
"uwregid": "ABC", "year": "2013", "quarter": "spring"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/myplan/student/api/plan/v1/2013,spring,1,ABC")
# libraries
url = reverse("myuw_rest_search", args=["libraries", "accounts"])
response = self.client.post(url, {"id": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/libraries/mylibinfo/v1/?id=javerage")
# iasystem
url = reverse("myuw_rest_search", args=[
"iasystem_uw", "uw/api/v1/evaluation"])
response = self.client.post(url, {"student_id": "123456"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/iasystem_uw/api/" +
"v1/evaluation?student_id=123456"))
# uwnetid
url = reverse("myuw_rest_search", args=["uwnetid", "password"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/javerage/password")
url = reverse("myuw_rest_search", args=["uwnetid", "subscription"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/" +
"javerage/subscription/60,64,105")
# grad
url = reverse("myuw_rest_search", args=[
"grad", "services/students/v1/api/committee"])
response = self.client.post(url, {
"id": "12345", "csrfmiddlewaretoken": "0000000"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/grad/services/" +
"students/v1/api/committee?id=12345"))
# notices
url = reverse("myuw_rest_search", args=["sws", "notices"])
response = self.client.post(url, {
"uwregid": "12345678123456781234567812345678",
"csrfmiddlewaretoken": "0000000"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/sws/student/v5/notice/" +
"12345678123456781234567812345678.json"))
# upass
url = reverse("myuw_rest_search", args=["upass", "index"])
response = self.client.post(url, {
"uwnetid": "bill",
"csrfmiddlewaretoken": "0000000"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/upass/MyUWUpass/MyUWUpass.aspx%3Fid=bill")
| 39.875
| 77
| 0.613214
|
586ad94d5bd76f1b1d97ebb93dce2b9562afad60
| 5,422
|
py
|
Python
|
password_generator.py
|
JBucho/PasswordGenerator
|
52bd12a751724189f81ca84385fc27eb986deea7
|
[
"MIT"
] | null | null | null |
password_generator.py
|
JBucho/PasswordGenerator
|
52bd12a751724189f81ca84385fc27eb986deea7
|
[
"MIT"
] | null | null | null |
password_generator.py
|
JBucho/PasswordGenerator
|
52bd12a751724189f81ca84385fc27eb986deea7
|
[
"MIT"
] | null | null | null |
# password generator
# requires at least python 3.6 to use secrets
import string
from secrets import choice
from pyperclip import copy
def password_gen_init():
"""Initialize generating password process.
Ask user about password parameters (length and strength) and
initializes process of generating password.
:return: string -> message containing generated password
"""
try:
length = int(input("\nEnter a passwords length: "))
strength = int(input("\nEnter a passwords strength: "))
if check_gen_conditions(length, strength):
password = generate_password(length, strength)
copy(password) # Copping password to clipboard
password_message = (
"\n-----------------------------"
"\nGenerated password is:\n\n"
+ f"{password}".center(30)
+ "\n\n*Your password has been also copied to clipboard."
)
return password_message
else:
print(
"\nYou gave wrong parameters for password generating."
"\nPlease, check the NOTEs and try again."
)
password_gen_init()
except ValueError:
print(
"\nError. Wrong input.\nInputs for strength and length has to be integer type (number)."
)
password_gen_init()
def check_gen_conditions(length, strength):
""" Checks if conditions for password generating are correct."""
if strength > 4:
strength = 4 # Reduce too high strength level to maximum level
print(
"\nNOTE: Given password strength was too high and it was reduced to maximum level, level 4."
)
if strength < 1 or length < 1:
if strength < 1:
print("\nNOTE: Given strength should be in range 1 - 4.")
if length < 1:
print(
"\nNOTE: Password length should be at least 1 (for level 1 strength)."
)
return False
elif length < strength:
print(
"\nNOTE: You gave wrong password length according to its strength."
"\n\t Length should me at least equal to strength."
)
return False
else:
return True
def generate_password(length, strength):
"""Generates password.
Generates password characters base due to chosen strength level
than generates password with given length and strength level.
:param length: integer >= 1
:param strength: integer 1 - 4 inclusive
:return: string - generated password
"""
digit = string.digits
lower = string.ascii_lowercase
upper = string.ascii_uppercase
symbol = string.punctuation
while True:
if strength == 1 and length >= 1:
password_base = lower
elif strength == 2 and length >= 2:
password_base = lower + upper
elif strength == 3 and length >= 3:
password_base = lower + upper + digit
elif strength >= 4 and length >= 4:
password_base = lower + upper + digit + symbol
else:
raise ValueError(
"Something is wrong. Please double-check your arguments for generating password. "
"\nLength should be at least equal to strength."
)
password = "".join(choice(password_base) for _ in range(length))
if check_password(password, strength, length):
return password
else:
continue
def check_password(password, strength, length):
"""Checks generated password.
Checks if generated password is correct.
Correct password contains at least one
character from each of characters group
for chosen strength level.
:param password: string -> generated password
:param strength: integer 1-4
:param length: integer >= 1
:return: bool -> True if password correct, False if not.
"""
if len(password) != length:
return False
if strength == 1 and length >= 1:
if all(char.islower() for char in password):
return True
elif strength == 2 and length >= 2:
if (
any(char.islower()) and any(char.isupper()) for char in password
) and password.isalpha():
return True
elif strength == 3 and length >= 3:
if (
any(char.islower() for char in password)
and any(char.isupper() for char in password)
and sum(char.isdigit() for char in password) >= 1
):
return True
elif strength >= 4 and length >= 4:
if (
any(char.islower() for char in password)
and any(char.isupper() for char in password)
and sum(char.isdigit() for char in password) >= 1
and any(char in string.punctuation for char in password)
):
return True
return False
if __name__ == "__main__":
print(
"This program will generate a random password of given length and strength."
"\nMinimum password length for strength in scale 1-4:\n"
"\n1. only lowercase - 1 character"
"\n2. lowercase and uppercase - 2 characters"
"\n3. lowercase, uppercase and digits - 3 characters"
"\n4. lowercase, uppercase, digits and punctuation - 4 characters"
)
outcome_message = password_gen_init()
print(outcome_message)
| 31.34104
| 104
| 0.598857
|
3fb4f62a33f69d7f19d1b0b9592d1b40e20a81e8
| 3,221
|
py
|
Python
|
multimodal/mmf/models/interfaces/image_models.py
|
yongkangzzz/mmfgroup
|
098a78c83e1c2973dc895d1dc7fd30d7d3668143
|
[
"MIT"
] | null | null | null |
multimodal/mmf/models/interfaces/image_models.py
|
yongkangzzz/mmfgroup
|
098a78c83e1c2973dc895d1dc7fd30d7d3668143
|
[
"MIT"
] | null | null | null |
multimodal/mmf/models/interfaces/image_models.py
|
yongkangzzz/mmfgroup
|
098a78c83e1c2973dc895d1dc7fd30d7d3668143
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# Used for MMF internal models, hateful memes task,
# make predictions on raw images and texts
import os
import tempfile
from pathlib import Path
from typing import Type, Union
import torch
import torchvision.datasets.folder as tv_helpers
from omegaconf import DictConfig
from mmf.common.sample import Sample, SampleList
from mmf.models.base_model import BaseModel
from mmf.utils.build import build_processors
from mmf.utils.download import download
from PIL import Image
from torch import nn
ImageType = Union[Type[Image.Image], str]
PathType = Union[Type[Path], str]
BaseModelType = Type[BaseModel]
class GeneralInterface(nn.Module):
def __init__(self, model: BaseModelType, config: DictConfig):
super().__init__()
self.model = model
self.config = config
self.init_processors()
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def init_processors(self):
config = self.config.dataset_config.hateful_memes
extra_params = {"data_dir": config.data_dir}
self.processor_dict = build_processors(config.processors, **extra_params)
def classify(self, image: ImageType, text: str, image_tensor = None, zero_image=False, zero_text=False):
"""Classifies a given image and text in it into Hateful/Non-Hateful.
Image can be a url or a local path or you can directly pass a PIL.Image.Image
object. Text needs to be a sentence containing all text in the image.
Args:
image (ImageType): Image to be classified
text (str): Text in the image
zero_image: zero out the image features when classifying
zero_text: zero out the text features when classifying
return_type: either "prob" or "logits"
Returns:
{"label": 0, "confidence": 0.56}
"""
sample = Sample()
if image_tensor != None:
sample.image = image_tensor
else:
if isinstance(image, str):
if image.startswith("http"):
temp_file = tempfile.NamedTemporaryFile()
download(image, *os.path.split(temp_file.name), disable_tqdm=True)
image = tv_helpers.default_loader(temp_file.name)
temp_file.close()
else:
image = tv_helpers.default_loader(image)
image = self.processor_dict["image_processor"](image)
sample.image = image
text = self.processor_dict["text_processor"]({"text": text})
sample.text = text["text"]
if "input_ids" in text:
sample.update(text)
sample_list = SampleList([sample])
device = next(self.model.parameters()).device
sample_list = sample_list.to(device)
output = self.model(sample_list, zero_image=zero_image, zero_text=zero_text)
scores = nn.functional.softmax(output["scores"], dim=1)
if image_tensor != None:
return scores
confidence, label = torch.max(scores, dim=1)
return {"label": label.item(), "confidence": confidence.item()}
| 32.535354
| 108
| 0.643899
|
6b1957f293395006b8c2d8d388281c65bf9f2402
| 1,932
|
py
|
Python
|
src/crawler/crawler.py
|
BWAI-SWmaestro/BWAI_Crawler
|
b7402b9753dfcbe5189cca12f9446b09820c61d6
|
[
"MIT"
] | null | null | null |
src/crawler/crawler.py
|
BWAI-SWmaestro/BWAI_Crawler
|
b7402b9753dfcbe5189cca12f9446b09820c61d6
|
[
"MIT"
] | 1
|
2021-06-02T03:57:49.000Z
|
2021-06-02T03:57:49.000Z
|
src/crawler/crawler.py
|
BWAI-SWmaestro/BWAI_Crawler
|
b7402b9753dfcbe5189cca12f9446b09820c61d6
|
[
"MIT"
] | null | null | null |
import requests
from pymongo import *
from bs4 import BeautifulSoup
from etc.secret_info import MONGO_HOST, MONGO_ID, MONGO_PW, MONGO_DB_NAME
header = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)\
AppleWebKit 537.36 (KHTML, like Gecko) Chrome",
"Accept":"text/html,application/xhtml+xml,application/xml;\
q=0.9,imgwebp,*/*;q=0.8"
}
class request_crawler:
# 생성자
def __init__(self, url):
super(request_crawler, self).__init__()
# Crawler
self.url = url
self.domain = self.url.split('/')[0] + '//' + self.url.split('/')[2]
self.target = self.url + '2'
self.url_list = []
self.page_num = 2
# DB Client
self.db_client = MongoClient('mongodb://%s:%s@%s' %(MONGO_ID, MONGO_PW, MONGO_HOST))
self.db = self.db_client['BWAI']
# return db
def getDB(self):
return self.db
# 페이지 넘버 이동
def changePage(self, num):
self.page_num += num
self.target = self.url + str(self.page_num)
# 페이지 가져오기
def getPage(self):
driver = requests.get(self.target, verify = False, headers = header).text
page = BeautifulSoup(driver, 'html.parser')
return page
# 페이지 리스트 생성
def makePagelist(self, sub_url_list, num):
if num <= len(self.url_list):
return False
elif sub_url_list:
for url in sub_url_list:
self.url_list.append(self.domain + url['href'])
return True
else:
return False
# 도메인 추출
def getDomain(self):
return self.domain
# target url_list 반환
def getURLlist(self):
return self.url_list
# DB cursor 반환
def getDB(self):
return self.db
# 소멸자
def __del__(self):
self.db_client.close()
| 28
| 93
| 0.557453
|
5f800e2b9c2ec80d89ccc7a2715c0740f87c4247
| 1,312
|
py
|
Python
|
code/analyze_nets.py
|
tchittesh/EAS-
|
b21acfc77f43db8abbda8f0e4029389b779a05fc
|
[
"MIT"
] | null | null | null |
code/analyze_nets.py
|
tchittesh/EAS-
|
b21acfc77f43db8abbda8f0e4029389b779a05fc
|
[
"MIT"
] | null | null | null |
code/analyze_nets.py
|
tchittesh/EAS-
|
b21acfc77f43db8abbda8f0e4029389b779a05fc
|
[
"MIT"
] | null | null | null |
import json
from os.path import join, isfile
import tensorflow as tf
from expdir_monitor.expdir_monitor import ExpdirMonitor
from data_providers.utils import get_data_provider_by_name
from models.utils import get_model_by_name
net_pool_path = '../net_pool_base1/Convnet/C10+/Conv_C10+_rl_small'
id2val = json.load(open(join(net_pool_path, 'net.id2val')))
str2id = json.load(open(join(net_pool_path, 'net.str2id')))
id = list(id2val.keys())[0]
em = ExpdirMonitor(f'{net_pool_path}/#{id}')
pure = False
valid_size = -1
init = em.load_init()
print(init['layer_cascade']['layers'][0].keys())
run_config = em.load_run_config(print_info=(not pure), dataset='C10+')
run_config.renew_logs = False
if valid_size > 0:
run_config.validation_size = valid_size
data_provider = get_data_provider_by_name(run_config.dataset, run_config.get_config())
net_config, model_name = em.load_net_config(init, print_info=(not pure))
model = get_model_by_name(model_name)(em.expdir, data_provider, run_config, net_config, pure=pure)
model._count_trainable_params()
start_epoch = 1
print('Testing...')
loss, accuracy = model.test(data_provider.test, batch_size=200)
print('mean cross_entropy: %f, mean accuracy: %f' % (loss, accuracy))
json.dump({'test_loss': '%s' % loss, 'test_acc': '%s' % accuracy}, open(em.output, 'w'))
| 33.641026
| 98
| 0.762957
|
7b9060c106c1937398c29baa9383555ba4b08535
| 319
|
py
|
Python
|
redhawk/test/test_common_node_init.py
|
spranesh/Redhawk
|
e2be5a6553df8449acecee2239b60c7bca0f22bc
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2016-10-04T11:46:32.000Z
|
2017-07-09T15:23:55.000Z
|
redhawk/test/test_common_node_init.py
|
spranesh/Redhawk
|
e2be5a6553df8449acecee2239b60c7bca0f22bc
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2016-03-07T13:16:48.000Z
|
2018-03-21T00:25:04.000Z
|
redhawk/test/test_common_node_init.py
|
spranesh/Redhawk
|
e2be5a6553df8449acecee2239b60c7bca0f22bc
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2016-04-06T08:04:34.000Z
|
2020-03-17T20:59:47.000Z
|
#!/usr/bin/env python
""" Test initialising of the Node class in redhawk/common/node.py"""
from __future__ import absolute_import
import redhawk.common.node as node
def TestNodeInit():
""" Test Node cannot be initialised. """
try:
n = node.Node()
except NotImplementedError as e:
return
assert(False)
| 22.785714
| 68
| 0.717868
|
3ca26b62bb4e359a496849fe80f97b733ce61088
| 272
|
py
|
Python
|
config.py
|
Lyken17/UCF-Experiment
|
e33b11bcef5bb118daa0ebd964f2dcaac4bba883
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
Lyken17/UCF-Experiment
|
e33b11bcef5bb118daa0ebd964f2dcaac4bba883
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
Lyken17/UCF-Experiment
|
e33b11bcef5bb118daa0ebd964f2dcaac4bba883
|
[
"Apache-2.0"
] | null | null | null |
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
database_dir = "/Users/lykensyu/Documents/ucf_sports_actions/"
output_dir = "data/collection.json"
| 34
| 62
| 0.753676
|
3ee2832709a39c3f8ec0a9237c2848f0f88dafe8
| 223
|
py
|
Python
|
pythonteste/desafio23.py
|
dangiotto/Python
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
[
"MIT"
] | 1
|
2020-10-17T03:23:59.000Z
|
2020-10-17T03:23:59.000Z
|
pythonteste/desafio23.py
|
dangiotto/Python
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
[
"MIT"
] | null | null | null |
pythonteste/desafio23.py
|
dangiotto/Python
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número entro 0 e 9999 :'))
u = n // 1 % 10
d = n //10 % 10
c = n //100 % 10
m = n //1000 % 10
print ('''Analisando o número {}
Unidade : {}
Dezena : {}
Centena : {}
Milhar: {}'''.format(n,u,d,c,m))
| 22.3
| 51
| 0.533632
|
7cbd0b26be19b96d536545fbdab8c37aa311da6d
| 839
|
py
|
Python
|
test/solution_tests/CHK/test_round_5.py
|
DPNT-Sourcecode/CHK-anrv01
|
84ff4fb65a416c87f0f18a76fec14bf525c81196
|
[
"Apache-2.0"
] | null | null | null |
test/solution_tests/CHK/test_round_5.py
|
DPNT-Sourcecode/CHK-anrv01
|
84ff4fb65a416c87f0f18a76fec14bf525c81196
|
[
"Apache-2.0"
] | null | null | null |
test/solution_tests/CHK/test_round_5.py
|
DPNT-Sourcecode/CHK-anrv01
|
84ff4fb65a416c87f0f18a76fec14bf525c81196
|
[
"Apache-2.0"
] | null | null | null |
from lib.solutions.CHK.checkout_solution import checkout
class Test:
def test_group_discount_buy_3(self):
input_skus = "XYY"
total_value = checkout(input_skus)
assert total_value == 45
def test_group_discount_buy_7(self):
input_skus = "XYYXSTS"
total_value = checkout(input_skus)
assert total_value == 107
def test_group_discount_only_2_discounted(self):
input_skus = "XAY"
total_value = checkout(input_skus)
assert total_value == 87
def test_group_discount_4_all_discounted(self):
input_skus = "SSSZ"
total_value = checkout(input_skus)
assert total_value == 65
def test_group_discount_4_all_discounted(self):
input_skus = "STXSTX"
total_value = checkout(input_skus)
assert total_value == 90
| 27.966667
| 56
| 0.675805
|
f18a298a9499da95b5d2cc69fe9bd95d077e3972
| 141
|
py
|
Python
|
week06/lecture/examples/src6/1/hello1.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
week06/lecture/examples/src6/1/hello1.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
week06/lecture/examples/src6/1/hello1.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
# get_string and print, with concatenation
from cs50 import get_string
answer = get_string("What's your name? ")
print("hello, " + answer)
| 20.142857
| 42
| 0.737589
|
e371914d51d10ee40a057149eed25367cff99329
| 2,644
|
py
|
Python
|
scripts/report_usd_vwap.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 7
|
2021-11-10T21:14:57.000Z
|
2022-03-26T07:27:23.000Z
|
scripts/report_usd_vwap.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 86
|
2021-11-09T13:12:58.000Z
|
2022-03-31T17:28:56.000Z
|
scripts/report_usd_vwap.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 2
|
2021-11-27T12:51:22.000Z
|
2022-03-12T16:38:00.000Z
|
"""Submits AMPL/USD/VWAP to TellorX on Rinkeby fifteen minutes
past each midnight."""
import asyncio
from datetime import datetime
from typing import Optional
from telliot_core.apps.telliot_config import TelliotConfig
from telliot_core.contract.contract import Contract
from telliot_core.utils.abi import rinkeby_tellor_master
from telliot_core.utils.abi import rinkeby_tellor_oracle
from telliot_feed_examples.feeds.usd_vwap import ampl_usd_vwap_feed
from telliot_feed_examples.reporters.interval import IntervalReporter
from telliot_feed_examples.utils.log import get_logger
logger = get_logger(__name__)
def get_cfg() -> TelliotConfig:
"""Get rinkeby endpoint from config
If environment variables are defined, they will override the values in config files
"""
cfg = TelliotConfig()
# Override configuration for rinkeby testnet
cfg.main.chain_id = 4
_ = cfg.get_endpoint()
return cfg
def get_master(cfg: TelliotConfig) -> Optional[Contract]:
"""Helper function for connecting to a contract at an address"""
endpoint = cfg.get_endpoint()
if not endpoint:
logger.critical("Could not connect to master contract.")
return None
endpoint.connect()
master = Contract(
address="0x657b95c228A5de81cdc3F85be7954072c08A6042",
abi=rinkeby_tellor_master, # type: ignore
node=endpoint,
private_key=cfg.main.private_key,
)
master.connect()
return master
def get_oracle(cfg: TelliotConfig) -> Optional[Contract]:
"""Helper function for connecting to a contract at an address"""
endpoint = cfg.get_endpoint()
if not endpoint:
logger.critical("Could not connect to master contract.")
return None
if endpoint:
endpoint.connect()
oracle = Contract(
address="0x07b521108788C6fD79F471D603A2594576D47477",
abi=rinkeby_tellor_oracle, # type: ignore
node=endpoint,
private_key=cfg.main.private_key,
)
oracle.connect()
return oracle
if __name__ == "__main__":
cfg = get_cfg()
master = get_master(cfg)
oracle = get_oracle(cfg)
rinkeby_endpoint = cfg.get_endpoint()
uspce_reporter = IntervalReporter(
endpoint=rinkeby_endpoint,
private_key=cfg.main.private_key,
master=master,
oracle=oracle,
datafeeds=[ampl_usd_vwap_feed],
)
# Report once UTC midnight passes
last_day = datetime.utcnow().day
while True:
day = datetime.utcnow().day
if day != last_day:
last_day = day
_ = asyncio.run(uspce_reporter.report_once()) # type: ignore
| 27.831579
| 87
| 0.704614
|
93008a31339f5cf01a5d2001fbc09c7e19912d31
| 3,164
|
py
|
Python
|
rllib/evaluation/postprocessing.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:02:58.000Z
|
2021-04-08T12:02:58.000Z
|
rllib/evaluation/postprocessing.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
rllib/evaluation/postprocessing.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import scipy.signal
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import DeveloperAPI
def discount(x: np.ndarray, gamma: float):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
class Postprocessing:
"""Constant definitions for postprocessing."""
ADVANTAGES = "advantages"
VALUE_TARGETS = "value_targets"
@DeveloperAPI
def compute_advantages(rollout: SampleBatch,
last_r: float,
gamma: float = 0.9,
lambda_: float = 1.0,
use_gae: bool = True,
use_critic: bool = True):
"""
Given a rollout, compute its value targets and the advantages.
Args:
rollout (SampleBatch): SampleBatch of a single trajectory.
last_r (float): Value estimation for last observation.
gamma (float): Discount factor.
lambda_ (float): Parameter for GAE.
use_gae (bool): Using Generalized Advantage Estimation.
use_critic (bool): Whether to use critic (value estimates). Setting
this to False will use 0 as baseline.
Returns:
SampleBatch (SampleBatch): Object with experience from rollout and
processed rewards.
"""
rollout_size = len(rollout[SampleBatch.ACTIONS])
assert SampleBatch.VF_PREDS in rollout or not use_critic, \
"use_critic=True but values not found"
assert use_critic or not use_gae, \
"Can't use gae without using a value function"
if use_gae:
vpred_t = np.concatenate(
[rollout[SampleBatch.VF_PREDS],
np.array([last_r])])
delta_t = (
rollout[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])
# This formula for the advantage comes from:
# "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438
rollout[Postprocessing.ADVANTAGES] = discount(delta_t, gamma * lambda_)
rollout[Postprocessing.VALUE_TARGETS] = (
rollout[Postprocessing.ADVANTAGES] +
rollout[SampleBatch.VF_PREDS]).copy().astype(np.float32)
else:
rewards_plus_v = np.concatenate(
[rollout[SampleBatch.REWARDS],
np.array([last_r])])
discounted_returns = discount(rewards_plus_v,
gamma)[:-1].copy().astype(np.float32)
if use_critic:
rollout[Postprocessing.
ADVANTAGES] = discounted_returns - rollout[SampleBatch.
VF_PREDS]
rollout[Postprocessing.VALUE_TARGETS] = discounted_returns
else:
rollout[Postprocessing.ADVANTAGES] = discounted_returns
rollout[Postprocessing.VALUE_TARGETS] = np.zeros_like(
rollout[Postprocessing.ADVANTAGES])
rollout[Postprocessing.ADVANTAGES] = rollout[
Postprocessing.ADVANTAGES].copy().astype(np.float32)
assert all(val.shape[0] == rollout_size for key, val in rollout.items()), \
"Rollout stacked incorrectly!"
return rollout
| 37.666667
| 79
| 0.619469
|
a44a4231fa12b52d4dbb4a4c31e144cce9f32f67
| 491
|
py
|
Python
|
examples/null_support/client.py
|
amrhgh/django-grpc-framework
|
158e1d9001bd426410ca962e2f72b14ee3e2f935
|
[
"Apache-2.0"
] | 269
|
2020-05-06T03:22:43.000Z
|
2022-03-26T21:05:24.000Z
|
examples/null_support/client.py
|
amrhgh/django-grpc-framework
|
158e1d9001bd426410ca962e2f72b14ee3e2f935
|
[
"Apache-2.0"
] | 19
|
2020-06-03T03:46:39.000Z
|
2022-03-30T20:24:55.000Z
|
examples/null_support/client.py
|
amrhgh/django-grpc-framework
|
158e1d9001bd426410ca962e2f72b14ee3e2f935
|
[
"Apache-2.0"
] | 39
|
2020-05-27T07:23:12.000Z
|
2022-03-27T13:10:24.000Z
|
import grpc
import snippets_pb2
import snippets_pb2_grpc
from google.protobuf.struct_pb2 import NullValue
with grpc.insecure_channel('localhost:50051') as channel:
stub = snippets_pb2_grpc.SnippetControllerStub(channel)
request = snippets_pb2.Snippet(id=1, title='snippet title')
# send non-null value
# request.language.value = "python"
# send null value
request.language.null = NullValue.NULL_VALUE
response = stub.Update(request)
print(response, end='')
| 30.6875
| 63
| 0.753564
|
e3dd8e113ee5ea78f2427e00566dbe5262b6c470
| 983
|
py
|
Python
|
src/dlepard/tcpproxy.py
|
acooks/dlepard
|
9300773a566290897839b845c4ec9f2feba3e93b
|
[
"MIT"
] | 5
|
2019-04-09T10:10:23.000Z
|
2020-11-11T19:23:18.000Z
|
src/dlepard/tcpproxy.py
|
acooks/dlepard
|
9300773a566290897839b845c4ec9f2feba3e93b
|
[
"MIT"
] | 6
|
2020-11-11T20:13:40.000Z
|
2020-11-24T17:59:05.000Z
|
src/dlepard/tcpproxy.py
|
Rohde-Schwarz/dlepard
|
9300773a566290897839b845c4ec9f2feba3e93b
|
[
"MIT"
] | 2
|
2021-09-28T05:44:04.000Z
|
2021-10-04T07:41:32.000Z
|
import asyncio
import logging
log = logging.getLogger("DLEPard")
class TCPProxy(asyncio.Protocol):
def __init__(self, ipv4adr, port, addr, receive_handler, loop=None):
if loop is None:
self.loop = asyncio.get_event_loop()
else:
self.loop = loop
self.running = False
self.ip_addr = ipv4adr
self.port = port
self.addr = addr
self.transport = None # type: asyncio.Transport
self.receive_handler = receive_handler
def connection_made(self, transport):
self.transport = transport
def data_received(self, data: bytes):
self.receive_handler(data)
def send_msg(self, message):
self.transport.write(message)
async def start(self):
coro = self.loop.create_connection(
lambda: self, host=self.ip_addr, port=self.port, local_addr=(self.addr, 0)
)
await asyncio.wait_for(coro, 5)
log.debug("Started TCP proxy")
| 27.305556
| 86
| 0.63174
|
50eb0a4a5d538ccc08e42e52d6f4bd5eb4a2e81d
| 4,729
|
py
|
Python
|
model/resnet_hgap.py
|
haifangong/SYSU-HCP-at-ImageCLEF-VQA-Med-2021
|
78b4ea80d21938f2c5ca07df071e776a13fe5fb1
|
[
"MIT"
] | 3
|
2021-05-19T15:42:12.000Z
|
2022-03-30T08:00:51.000Z
|
model/resnet_hgap.py
|
Rodger-Huang/SYSU-HCP-at-ImageCLEF-VQA-Med-2021
|
78b4ea80d21938f2c5ca07df071e776a13fe5fb1
|
[
"MIT"
] | null | null | null |
model/resnet_hgap.py
|
Rodger-Huang/SYSU-HCP-at-ImageCLEF-VQA-Med-2021
|
78b4ea80d21938f2c5ca07df071e776a13fe5fb1
|
[
"MIT"
] | 1
|
2021-10-16T09:09:58.000Z
|
2021-10-16T09:09:58.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
class resnet50HGap(nn.Module):
def __init__(self, pretrained=True, num_classes=330):
super(resnet50HGap, self).__init__()
self.resnet50 = resnet50(pretrained=pretrained)
self.model_list = list(self.resnet50.children())
self.conv1 = self.model_list[0]
self.bn1 = self.model_list[1]
self.relu = self.model_list[2]
self.maxpool = self.model_list[3]
self.layer1 = self.model_list[4]
self.layer2 = self.model_list[5]
self.layer3 = self.model_list[6]
self.layer4 = self.model_list[7]
self.classifier = nn.Linear(3968, num_classes)
def forward(self, x):
x = self.conv1(x)
p1 = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
p2 = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
x = self.layer1(x)
p3 = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
x = self.layer2(x)
p4 = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
x = self.layer3(x)
p5 = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
x = self.layer4(x)
p6 = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
out = torch.cat([p1, p2, p3, p4, p5, p6], 1)
out = self.classifier(out)
return out
class vgg16HGap_bn(nn.Module):
def __init__(self, pretrained=True, num_classes=330):
super(vgg16HGap_bn, self).__init__()
self.vgg = torchvision.models.vgg16_bn(pretrained=pretrained).features
self.model_list = list(self.vgg.children())
self.conv1 = nn.Sequential(*self.model_list[0:6])
self.conv2 = nn.Sequential(*self.model_list[6:13])
self.conv3 = nn.Sequential(*self.model_list[13:23])
self.conv4 = nn.Sequential(*self.model_list[23:33])
self.conv5 = nn.Sequential(*self.model_list[33:43])
self.conv6 = nn.Sequential(self.model_list[43])
self.pool1 = nn.AvgPool2d(224)
self.pool2 = nn.AvgPool2d(112)
self.pool3 = nn.AvgPool2d(56)
self.pool4 = nn.AvgPool2d(28)
self.pool5 = nn.AvgPool2d(14)
self.pool6 = nn.AvgPool2d(7)
self.classifier = nn.Linear(1984, num_classes)
def forward(self, x):
y1 = self.conv1(x)
p1 = self.pool1(y1)
y2 = self.conv2(y1)
p2 = self.pool2(y2)
y3 = self.conv3(y2)
p3 = self.pool3(y3)
y4 = self.conv4(y3)
p4 = self.pool4(y4)
y5 = self.conv5(y4)
p5 = self.pool5(y5)
y6 = self.conv6(y5)
p6 = self.pool6(y6)
out = torch.cat([p1,p2,p3,p4,p5,p6], 1).squeeze(3)
out = out.squeeze(2)
out = self.classifier(out)
return out
class vgg19HGap(nn.Module):
def __init__(self, pretrained=True, num_classes=330):
super(vgg19HGap, self).__init__()
self.vgg = torchvision.models.vgg19(pretrained=pretrained).features
self.model_list = list(self.vgg.children())
self.conv1 = nn.Sequential(*self.model_list[0:4])
self.conv2 = nn.Sequential(*self.model_list[4:9])
self.conv3 = nn.Sequential(*self.model_list[9:18])
self.conv4 = nn.Sequential(*self.model_list[18:27])
self.conv5 = nn.Sequential(*self.model_list[27:36])
self.conv6 = nn.Sequential(self.model_list[36])
self.pool1 = nn.AvgPool2d(224)
self.pool2 = nn.AvgPool2d(112)
self.pool3 = nn.AvgPool2d(56)
self.pool4 = nn.AvgPool2d(28)
self.pool5 = nn.AvgPool2d(14)
self.pool6 = nn.AvgPool2d(7)
self.classifier = nn.Linear(1984, num_classes)
def forward(self, x):
y1 = self.conv1(x)
p1 = self.pool1(y1)
y2 = self.conv2(y1)
p2 = self.pool2(y2)
y3 = self.conv3(y2)
p3 = self.pool3(y3)
y4 = self.conv4(y3)
p4 = self.pool4(y4)
y5 = self.conv5(y4)
p5 = self.pool5(y5)
y6 = self.conv6(y5)
p6 = self.pool6(y6)
out = torch.cat([p1,p2,p3,p4,p5,p6],1).squeeze(3)
out = out.squeeze(2)
out = self.classifier(out)
return out
if __name__ == '__main__':
model = resnet50HGap(pretrained=True)
model(torch.rand(2, 3, 224, 224))
| 33.778571
| 80
| 0.562698
|
851ed24d07befc813915cbc6b6a0163608039250
| 4,570
|
py
|
Python
|
synology_api/auth.py
|
VincentRoma/synology-api
|
8894d6c2d47fc2330616428224abb35ce91e5421
|
[
"MIT"
] | null | null | null |
synology_api/auth.py
|
VincentRoma/synology-api
|
8894d6c2d47fc2330616428224abb35ce91e5421
|
[
"MIT"
] | null | null | null |
synology_api/auth.py
|
VincentRoma/synology-api
|
8894d6c2d47fc2330616428224abb35ce91e5421
|
[
"MIT"
] | null | null | null |
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
class Authentication:
def __init__(self, ip_address, port, username, password, secure=False, cert_verify=False):
self._ip_address = ip_address
self._port = port
self._username = username
self._password = password
self._sid = None
self._session_expire = True
self._verify = cert_verify
if self._verify is False:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
schema = 'https' if secure else 'http'
self._base_url = '%s://%s:%s/webapi/' % (schema, self._ip_address, self._port)
self.full_api_list = {}
self.app_api_list = {}
def verify_cert_enabled(self):
return self._verify
def login(self, application):
login_api = 'auth.cgi?api=SYNO.API.Auth'
param = {'version': '2', 'method': 'login', 'account': self._username,
'passwd': self._password, 'session': application, 'format': 'cookie'}
if not self._session_expire:
if self._sid is not None:
self._session_expire = False
return 'User already logged'
else:
session_request = requests.get(self._base_url + login_api, param, verify=self._verify)
self._sid = session_request.json()['data']['sid']
self._session_expire = False
return 'User logging... New session started!'
def logout(self, application):
logout_api = 'auth.cgi?api=SYNO.API.Auth'
param = {'version': '2', 'method': 'logout', 'session': application}
response = requests.get(self._base_url + logout_api, param, verify=self._verify)
if response.json()['success'] is True:
self._session_expire = True
self._sid = None
return 'Logged out'
else:
self._session_expire = True
self._sid = None
return 'No valid session is open'
def get_api_list(self, app=None):
query_path = 'query.cgi?api=SYNO.API.Info'
list_query = {'version': '1', 'method': 'query', 'query': 'all'}
response = requests.get(self._base_url + query_path, list_query, verify=self._verify).json()
if app is not None:
for key in response['data']:
if app.lower() in key.lower():
self.app_api_list[key] = response['data'][key]
else:
self.full_api_list = response['data']
return
def show_api_name_list(self):
prev_key = ''
for key in self.full_api_list:
if key != prev_key:
print(key)
prev_key = key
return
def show_json_response_type(self):
for key in self.full_api_list:
for sub_key in self.full_api_list[key]:
if sub_key == 'requestFormat':
if self.full_api_list[key]['requestFormat'] == 'JSON':
print(key + ' Returns JSON data')
return
def search_by_app(self, app):
print_check = 0
for key in self.full_api_list:
if app.lower() in key.lower():
print(key)
print_check += 1
continue
if print_check == 0:
print('Not Found')
return
def request_data(self, api_name, api_path, req_param, method=None, response_json=True): # 'post' or 'get'
# Convert all boolean in string in lowercase because Synology API is waiting for "true" or "false"
for k, v in req_param.items():
if isinstance(v, bool):
req_param[k] = str(v).lower()
if method is None:
method = 'get'
req_param['_sid'] = self._sid
if method is 'get':
url = ('%s%s' % (self._base_url, api_path)) + '?api=' + api_name
response = requests.get(url, req_param, verify=self._verify)
if response_json is True:
return response.json()
else:
return response
elif method is 'post':
url = ('%s%s' % (self._base_url, api_path)) + '?api=' + api_name
response = requests.post(url, req_param, verify=self._verify)
if response_json is True:
return response.json()
else:
return response
@property
def sid(self):
return self._sid
@property
def base_url(self):
return self._base_url
| 34.360902
| 110
| 0.569803
|
5341b355d102541c4eb6d52cee0905eb8192fc13
| 3,013
|
py
|
Python
|
moi/home/migrations/0017_auto_20160508_1144.py
|
Ecotrust/F2S-MOI
|
aeb38942d6539c50f252ea3ff6fbff07aabc5088
|
[
"Apache-2.0"
] | null | null | null |
moi/home/migrations/0017_auto_20160508_1144.py
|
Ecotrust/F2S-MOI
|
aeb38942d6539c50f252ea3ff6fbff07aabc5088
|
[
"Apache-2.0"
] | 33
|
2015-05-06T00:47:20.000Z
|
2016-11-08T21:13:44.000Z
|
moi/home/migrations/0017_auto_20160508_1144.py
|
Ecotrust/F2S-MOI
|
aeb38942d6539c50f252ea3ff6fbff07aabc5088
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-08 11:44
from __future__ import unicode_literals
import core.models
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0016_auto_20160518_1729'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body_content',
field=wagtail.wagtailcore.fields.StreamField([(b'number_count_up', wagtail.wagtailcore.blocks.StructBlock([(b'content', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter your main content above. Do not use commas for larger numbers.', label=b'Text')), (b'numbers', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the numbers you'd like to count up - seperated by a semicolon. Do not use commas for larger numbers. Ex: 4; 51000; 15", label=b'Numbers to count', required=False)), (b'colored_text', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the content you'd like to be a different color - each set of content is seperated by a semicolon", required=False)), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter a source for the associated information.', required=False))], icon=b'order', label=b'Content and Number Counter Block')), (b'top_story', wagtail.wagtailcore.blocks.StructBlock([(b'sector', core.models.SectorChoiceBlock(help_text=b'Select the sector/top-story this aligns with')), (b'content', wagtail.wagtailcore.blocks.StructBlock([(b'content', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter your main content above. Do not use commas for larger numbers.', label=b'Text')), (b'numbers', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the numbers you'd like to count up - seperated by a semicolon. Do not use commas for larger numbers. Ex: 4; 51000; 15", label=b'Numbers to count', required=False)), (b'colored_text', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the content you'd like to be a different color - each set of content is seperated by a semicolon", required=False)), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter a source for the associated information.', required=False))])), (b'link_caption', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Add the text you would like to display that will link to the sector page', label=b'Link text')), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Display your source here', required=False))], icon=b'title', label=b'Top Story Content Block')), (b'basic_content', wagtail.wagtailcore.blocks.StructBlock([(b'content', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Add your text and/or image content above', label=b'Content Area')), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Display your source here', required=False))], icon=b'pilcrow', label=b'Basic Content Block'))], blank=True, default=None, null=True),
),
]
| 125.541667
| 2,529
| 0.759044
|
d9109d1830a24c617348f4d700ddfba22ff91114
| 5,697
|
py
|
Python
|
trade_network_classification/trade_data/img_train.py
|
gg4u/cnc_2017
|
1a5c52c3207ba131139214d14a2161af2db80a5c
|
[
"MIT"
] | null | null | null |
trade_network_classification/trade_data/img_train.py
|
gg4u/cnc_2017
|
1a5c52c3207ba131139214d14a2161af2db80a5c
|
[
"MIT"
] | null | null | null |
trade_network_classification/trade_data/img_train.py
|
gg4u/cnc_2017
|
1a5c52c3207ba131139214d14a2161af2db80a5c
|
[
"MIT"
] | 1
|
2020-12-17T14:46:01.000Z
|
2020-12-17T14:46:01.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 13 19:23:04 2017
@author: xinruyue
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os
import pickle
import numpy as np
import random
BATCH_SIZE = 100
#data
file_path = '/Users/xinruyue/xin/cnc_2017/trade_network_classification/trade_data/enriched-0-5'
print(file_path)
#prepare train data
f1_ = os.path.join(file_path,'train_data_0.pkl')
f1 = open(f1_,'rb')
content1 = pickle.load(f1,encoding='iso-8859-1')
f2_ = os.path.join(file_path,'train_data_5.pkl')
f2 = open(f2_,'rb')
content2 = pickle.load(f2,encoding='iso-8859-1')
d_train_data = content1 + content2
s1 = len(content1)
s2 = len(content2)
s = s1 + s2
d_train_labels = [0] * s1
d_labels_0 = [1] * s2
d_train_labels.extend(d_labels_0)
data_label_pair = list(zip(d_train_data, d_train_labels))
random.shuffle(data_label_pair)
train_data_t = list(zip(*data_label_pair))[0]
train_labels_t = list(zip(*data_label_pair))[1]
train_data = np.array(train_data_t).reshape((s,1,48,48))
train_labels = train_labels_t
train_size = train_data.shape[0]
train_data = torch.from_numpy(train_data)
train_labels = torch.LongTensor(train_labels)
#prepare val data
f3_ = os.path.join(file_path,'val_data_0.pkl')
f3 = open(f3_,'rb')
content3 = pickle.load(f3,encoding='iso-8859-1')
f4_ = os.path.join(file_path,'val_data_5.pkl')
f4 = open(f4_,'rb')
content4 = pickle.load(f4,encoding='iso-8859-1')
d_val_data = content3 + content4
s3 = len(content3)
s4 = len(content4)
s_ = s3 + s4
d_val_labels = [0] * s3
d_labels_1 = [1] * s4
d_val_labels.extend(d_labels_1)
val_data_label_pair = list(zip(d_val_data, d_val_labels))
random.shuffle(val_data_label_pair)
val_data_t = list(zip(*val_data_label_pair))[0]
val_labels_t = list(zip(*val_data_label_pair))[1]
val_data = np.array(val_data_t).reshape((s_,1,48,48))
val_labels = val_labels_t
val_data = torch.from_numpy(val_data)
val_labels = torch.LongTensor(val_labels)
#prepare test data
f5_ = os.path.join(file_path,'test_data_0.pkl')
f5 = open(f5_,'rb')
content5 = pickle.load(f5,encoding='iso-8859-1')
f6_ = os.path.join(file_path,'test_data_5.pkl')
f6 = open(f6_,'rb')
content6 = pickle.load(f6,encoding='iso-8859-1')
d_test_data = content5 + content6
s5 = len(content3)
s6 = len(content4)
s_ = s5 + s6
d_test_labels = [0] * s5
d_labels_2 = [1] * s6
d_test_labels.extend(d_labels_2)
test_data_label_pair = list(zip(d_test_data, d_test_labels))
random.shuffle(test_data_label_pair)
test_data_t = list(zip(*test_data_label_pair))[0]
test_labels_t = list(zip(*test_data_label_pair))[1]
test_data = np.array(test_data_t).reshape((s_,1,48,48))
test_labels = test_labels_t
test_data = torch.from_numpy(test_data)
test_labels = torch.LongTensor(test_labels)
#model
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1 = nn.Conv2d(1,15,5)
self.pool = nn.MaxPool2d(2,2)
self.conv2 = nn.Conv2d(15,30,5)
self.fc1 = nn.Linear(30*9*9,300)
self.fc2 = nn.Linear(300,2)
def forward(self,x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1,30*9*9)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net = Net()
net.double()
#代价函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),lr=0.01,momentum=0.9)
full_mean_los = []
ep = []
acu = []
acu_train = []
for epoch in range(30):
los = []
steps = int(train_size / BATCH_SIZE)
running_loss = 0.0
acu_t = []
for step in range(steps):
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
inputs = train_data[offset:(offset + BATCH_SIZE), :, :, :]
labels = train_labels[offset:(offset + BATCH_SIZE)]
inputs,labels = Variable(inputs),Variable(labels)
#train
optimizer.zero_grad()
train_outputs = net(inputs)
_,pred = torch.max(train_outputs.data, 1)
tol = labels.size(0)
corr = (pred == labels.data[0]).sum()
accu = corr/tol
acu_t.append(accu)
loss = criterion(train_outputs,labels)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
los.append(running_loss)
#print('[%d, %5d] loss: %3f'%(epoch+1, step, running_loss))
running_loss = 0.0
acu_t_e = np.mean(acu_t)
acu_train.append(acu_t_e)
print('One epoch training finished')
mean_loss = np.mean(los)
full_mean_los.append(mean_loss)
ep.append(epoch)
correct = 0
total = 0
val_outputs = net(Variable(val_data))
_,predicted = torch.max(val_outputs.data, 1)
total = val_labels.size(0)
correct = (predicted == val_labels).sum()
accuracy = correct/total
acu.append(accuracy)
print('Accuracy of the network on the test images: %d %%'% (100*accuracy))
test_outputs = net(Variable(test_data))
_,test_predicted = torch.max(test_outputs.data, 1)
test_total = test_labels.size(0)
test_correct = (test_predicted == test_labels).sum()
test_accuracy = test_correct/test_total
print('Accuracy of the network on the test images: %d %%'% (100*test_accuracy))
with open('epoch.txt','w') as f1:
for each in ep:
f1.write(str(each))
f1.write('\n')
with open('loss.txt','w') as f2:
for each in full_mean_los:
f2.write(str(each))
f2.write('\n')
with open('val_acu.txt','w') as f3:
for each in acu:
f3.write(str(each))
f3.write('\n')
with open('train_acu.txt','w') as f4:
for each in acu_train:
f4.write(str(each))
f4.write('\n')
| 26.872642
| 95
| 0.677023
|
c6301c3eb68bb404ced5de84aa5ea501b52607a4
| 314
|
py
|
Python
|
rl_trainer/episode_serializer/episode_serializer.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | 3
|
2018-08-31T15:04:53.000Z
|
2019-07-13T01:11:10.000Z
|
rl_trainer/episode_serializer/episode_serializer.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | null | null | null |
rl_trainer/episode_serializer/episode_serializer.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | null | null | null |
from typeguard import typechecked
from rl_trainer.commons import Episode
class EpisodeSerializer:
def serialize(self, episode: Episode, output_fname: str) -> None:
raise NotImplementedError
class EpisodeParser:
def parse(self, episode_fname: str) -> Episode:
raise NotImplementedError
| 22.428571
| 69
| 0.751592
|
783dca9356a1a1541d9c0961307f1bce6fbf4700
| 94
|
py
|
Python
|
dream/game/reward.py
|
icyblade/dream
|
818e77f1c25e51f8cd966f7aa4eb1bcd4207b208
|
[
"MIT"
] | null | null | null |
dream/game/reward.py
|
icyblade/dream
|
818e77f1c25e51f8cd966f7aa4eb1bcd4207b208
|
[
"MIT"
] | null | null | null |
dream/game/reward.py
|
icyblade/dream
|
818e77f1c25e51f8cd966f7aa4eb1bcd4207b208
|
[
"MIT"
] | null | null | null |
class Reward(float):
"""Reward of Texas Hold'em.
It's just a float.
"""
pass
| 13.428571
| 31
| 0.553191
|
2cb6143ba5fa048d7742fb195c25de4b7c86b934
| 2,451
|
py
|
Python
|
Personal scripts/Physics/oribt.py
|
powplowdevs/2021-2022-Projects
|
1b704e9dbb2768a3acf271b2de87ccb28ab8b933
|
[
"MIT"
] | null | null | null |
Personal scripts/Physics/oribt.py
|
powplowdevs/2021-2022-Projects
|
1b704e9dbb2768a3acf271b2de87ccb28ab8b933
|
[
"MIT"
] | null | null | null |
Personal scripts/Physics/oribt.py
|
powplowdevs/2021-2022-Projects
|
1b704e9dbb2768a3acf271b2de87ccb28ab8b933
|
[
"MIT"
] | null | null | null |
import pygame
import pymunk
import numpy as np
import math
#Start pygame
pygame.init()
#Make display
HEIGHT = 600
WITDH = 600
display = pygame.display.set_mode((WITDH,HEIGHT))
#SET FPS
FPS = 50
clock = pygame.time.Clock()
#our pymunk simulation "world" or space
space = pymunk.Space()
#CONVERT PYGAME CORDS TO PYMUNK CORDS FUNCTION
def convert_cords(point):
return point[0], WITDH-point[1]
def gforce(p1,p2):
# Calculate the gravitational force exerted on p1 by p2.
G = 1 # Change to 6.67e-11 to use real-world values.
# Calculate distance vector between p1 and p2.
r_vec = p1.body.position-p2.body.position
# Calculate magnitude of distance vector.
r_mag = np.linalg.norm(r_vec)
# Calcualte unit vector of distance vector.
r_hat = r_vec/r_mag
# Calculate force magnitude.
force_mag = G*p1.shape.mass*p2.shape.mass/r_mag**2
# Calculate force vector.
force_vec = -force_mag*r_hat
return force_vec
class Ball():
def __init__(self,x,y, vel, mass, type=""):
#A body
if type == "STATIC":
self.body = pymunk.Body(body_type=pymunk.Body.STATIC)
else:
self.body = pymunk.Body()
self.body.position = x,y
self.body.velocity = vel
#A shape
self.shape = pymunk.Circle(self.body,10)
self.shape.density = 1
self.shape.mass = mass
self.shape.elasticity = 1
#add body and shape to space
space.add(self.body,self.shape)
def draw(self):
#show the circle
x,y = convert_cords(self.body.position)
pygame.draw.circle(display,(255,0,0),(int(x),int(y)), 10)
#GAME FUNCTION
def game():
planet = Ball(300,300,(-70,0),10)
moon = Ball(300, 350, (10,0), 10)
while True:
#check to see if user wants to exit
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
display.fill((255,255,255))#draw white background
#move objects
forcemtp = gforce(planet,moon)
forceptm = gforce(moon,planet)
moon.body.velocity += (forcemtp[0]*-50, forcemtp[1]*-50)
planet.body.velocity += (forceptm[0]*-50, forceptm[1]*-50)
#draw objects
planet.draw()
moon.draw()
#Update display
pygame.display.update()
#FPS TICK
clock.tick(FPS)
space.step(1/FPS)
#RUN GAME
game()
pygame.quit()
| 25.268041
| 66
| 0.616075
|
a6e35b478c90aaf64c8e8acc39655636d11c8dd6
| 1,138
|
py
|
Python
|
mandelbrot/main.py
|
marvingabler/chaos
|
7f668379318554639467e35acc56d8fb1b83ac9e
|
[
"MIT"
] | null | null | null |
mandelbrot/main.py
|
marvingabler/chaos
|
7f668379318554639467e35acc56d8fb1b83ac9e
|
[
"MIT"
] | null | null | null |
mandelbrot/main.py
|
marvingabler/chaos
|
7f668379318554639467e35acc56d8fb1b83ac9e
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageDraw
MAX_ITER = 80
def mandelbrot(c):
z = 0
n = 0
while abs(z) <= 2 and n < MAX_ITER:
z = z*z + c
n += 1
return n
def print_mandelbrot():
for a in range(-10, 10, 5):
for b in range(-10, 10, 5):
c = complex(a / 10, b / 10)
print(c, mandelbrot(c))
# Image size (pixels)
WIDTH = 24000
HEIGHT = 16000
# Plot window
RE_START = -2
RE_END = 1
IM_START = -1
IM_END = 1
im = Image.new('HSV', (WIDTH, HEIGHT), (0, 0, 0))
draw = ImageDraw.Draw(im)
for x in range(0, WIDTH):
for y in range(0, HEIGHT):
# Convert pixel coordinate to complex number
c = complex(RE_START + (x / WIDTH) * (RE_END - RE_START),
IM_START + (y / HEIGHT) * (IM_END - IM_START))
# Compute the number of iterations
m = mandelbrot(c)
# The color depends on the number of iterations
hue = int(255 * m / MAX_ITER)
saturation = 255
value = 255 if m < MAX_ITER else 0
# Plot the point
draw.point([x, y], (hue, saturation, value))
im.convert("RGB").save('output.png', 'PNG')
| 23.708333
| 66
| 0.557996
|
2461fb5068422e5a47f163fe9adc0a3adbfe67aa
| 14,163
|
py
|
Python
|
BaseTools/Source/Python/Common/Uefi/Capsule/FmpCapsuleHeader.py
|
KrzysztofKoch1/edk2
|
4d621893471c6299de06aeac56f4c6cddc5c9ebe
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
BaseTools/Source/Python/Common/Uefi/Capsule/FmpCapsuleHeader.py
|
KrzysztofKoch1/edk2
|
4d621893471c6299de06aeac56f4c6cddc5c9ebe
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
BaseTools/Source/Python/Common/Uefi/Capsule/FmpCapsuleHeader.py
|
KrzysztofKoch1/edk2
|
4d621893471c6299de06aeac56f4c6cddc5c9ebe
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
## @file
# Module that encodes and decodes a EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER with
# a payload.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
FmpCapsuleHeader
'''
import struct
import uuid
class FmpCapsuleImageHeaderClass (object):
# typedef struct {
# UINT32 Version;
#
# ///
# /// Used to identify device firmware targeted by this update. This guid is matched by
# /// system firmware against ImageTypeId field within a EFI_FIRMWARE_IMAGE_DESCRIPTOR
# ///
# EFI_GUID UpdateImageTypeId;
#
# ///
# /// Passed as ImageIndex in call to EFI_FIRMWARE_MANAGEMENT_PROTOCOL.SetImage ()
# ///
# UINT8 UpdateImageIndex;
# UINT8 reserved_bytes[3];
#
# ///
# /// Size of the binary update image which immediately follows this structure
# ///
# UINT32 UpdateImageSize;
#
# ///
# /// Size of the VendorCode bytes which optionally immediately follow binary update image in the capsule
# ///
# UINT32 UpdateVendorCodeSize;
#
# ///
# /// The HardwareInstance to target with this update. If value is zero it means match all
# /// HardwareInstances. This field allows update software to target only a single device in
# /// cases where there are more than one device with the same ImageTypeId GUID.
# /// This header is outside the signed data of the Authentication Info structure and
# /// therefore can be modified without changing the Auth data.
# ///
# UINT64 UpdateHardwareInstance;
# } EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER;
#
# #define EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER_INIT_VERSION 0x00000002
_StructFormat = '<I16sB3BIIQ'
_StructSize = struct.calcsize (_StructFormat)
EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER_INIT_VERSION = 0x00000002
def __init__ (self):
self._Valid = False
self.Version = self.EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER_INIT_VERSION
self.UpdateImageTypeId = uuid.UUID ('00000000-0000-0000-0000-000000000000')
self.UpdateImageIndex = 0
self.UpdateImageSize = 0
self.UpdateVendorCodeSize = 0
self.UpdateHardwareInstance = 0x0000000000000000
self.Payload = b''
self.VendorCodeBytes = b''
def Encode (self):
self.UpdateImageSize = len (self.Payload)
self.UpdateVendorCodeSize = len (self.VendorCodeBytes)
FmpCapsuleImageHeader = struct.pack (
self._StructFormat,
self.Version,
self.UpdateImageTypeId.bytes_le,
self.UpdateImageIndex,
0,0,0,
self.UpdateImageSize,
self.UpdateVendorCodeSize,
self.UpdateHardwareInstance
)
self._Valid = True
return FmpCapsuleImageHeader + self.Payload + self.VendorCodeBytes
def Decode (self, Buffer):
if len (Buffer) < self._StructSize:
raise ValueError
(Version, UpdateImageTypeId, UpdateImageIndex, r0, r1, r2, UpdateImageSize, UpdateVendorCodeSize, UpdateHardwareInstance) = \
struct.unpack (
self._StructFormat,
Buffer[0:self._StructSize]
)
if Version < self.EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER_INIT_VERSION:
raise ValueError
if UpdateImageIndex < 1:
raise ValueError
if UpdateImageSize + UpdateVendorCodeSize != len (Buffer[self._StructSize:]):
raise ValueError
self.Version = Version
self.UpdateImageTypeId = uuid.UUID (bytes_le = UpdateImageTypeId)
self.UpdateImageIndex = UpdateImageIndex
self.UpdateImageSize = UpdateImageSize
self.UpdateVendorCodeSize = UpdateVendorCodeSize
self.UpdateHardwareInstance = UpdateHardwareInstance
self.Payload = Buffer[self._StructSize:self._StructSize + UpdateImageSize]
self.VendorCodeBytes = Buffer[self._StructSize + UpdateImageSize:]
self._Valid = True
return Buffer[self._StructSize:]
def DumpInfo (self):
if not self._Valid:
raise ValueError
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER.Version = {Version:08X}'.format (Version = self.Version))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER.UpdateImageTypeId = {UpdateImageTypeId}'.format (UpdateImageTypeId = str(self.UpdateImageTypeId).upper()))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER.UpdateImageIndex = {UpdateImageIndex:08X}'.format (UpdateImageIndex = self.UpdateImageIndex))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER.UpdateImageSize = {UpdateImageSize:08X}'.format (UpdateImageSize = self.UpdateImageSize))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER.UpdateVendorCodeSize = {UpdateVendorCodeSize:08X}'.format (UpdateVendorCodeSize = self.UpdateVendorCodeSize))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER.UpdateHardwareInstance = {UpdateHardwareInstance:016X}'.format (UpdateHardwareInstance = self.UpdateHardwareInstance))
print ('sizeof (Payload) = {Size:08X}'.format (Size = len (self.Payload)))
print ('sizeof (VendorCodeBytes) = {Size:08X}'.format (Size = len (self.VendorCodeBytes)))
class FmpCapsuleHeaderClass (object):
# typedef struct {
# UINT32 Version;
#
# ///
# /// The number of drivers included in the capsule and the number of corresponding
# /// offsets stored in ItemOffsetList array.
# ///
# UINT16 EmbeddedDriverCount;
#
# ///
# /// The number of payload items included in the capsule and the number of
# /// corresponding offsets stored in the ItemOffsetList array.
# ///
# UINT16 PayloadItemCount;
#
# ///
# /// Variable length array of dimension [EmbeddedDriverCount + PayloadItemCount]
# /// containing offsets of each of the drivers and payload items contained within the capsule
# ///
# // UINT64 ItemOffsetList[];
# } EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER;
#
# #define EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER_INIT_VERSION 0x00000001
_StructFormat = '<IHH'
_StructSize = struct.calcsize (_StructFormat)
_ItemOffsetFormat = '<Q'
_ItemOffsetSize = struct.calcsize (_ItemOffsetFormat)
EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER_INIT_VERSION = 0x00000001
def __init__ (self):
self._Valid = False
self.Version = self.EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER_INIT_VERSION
self.EmbeddedDriverCount = 0
self.PayloadItemCount = 0
self._ItemOffsetList = []
self._EmbeddedDriverList = []
self._PayloadList = []
self._FmpCapsuleImageHeaderList = []
def AddEmbeddedDriver (self, EmbeddedDriver):
self._EmbeddedDriverList.append (EmbeddedDriver)
def GetEmbeddedDriver (self, Index):
if Index > len (self._EmbeddedDriverList):
raise ValueError
return self._EmbeddedDriverList[Index]
def AddPayload (self, UpdateImageTypeId, Payload = b'', VendorCodeBytes = b'', HardwareInstance = 0):
self._PayloadList.append ((UpdateImageTypeId, Payload, VendorCodeBytes, HardwareInstance))
def GetFmpCapsuleImageHeader (self, Index):
if Index >= len (self._FmpCapsuleImageHeaderList):
raise ValueError
return self._FmpCapsuleImageHeaderList[Index]
def Encode (self):
self.EmbeddedDriverCount = len (self._EmbeddedDriverList)
self.PayloadItemCount = len (self._PayloadList)
FmpCapsuleHeader = struct.pack (
self._StructFormat,
self.Version,
self.EmbeddedDriverCount,
self.PayloadItemCount
)
FmpCapsuleData = b''
Offset = self._StructSize + (self.EmbeddedDriverCount + self.PayloadItemCount) * self._ItemOffsetSize
for EmbeddedDriver in self._EmbeddedDriverList:
FmpCapsuleData = FmpCapsuleData + EmbeddedDriver
self._ItemOffsetList.append (Offset)
Offset = Offset + len (EmbeddedDriver)
Index = 1
for (UpdateImageTypeId, Payload, VendorCodeBytes, HardwareInstance) in self._PayloadList:
FmpCapsuleImageHeader = FmpCapsuleImageHeaderClass ()
FmpCapsuleImageHeader.UpdateImageTypeId = UpdateImageTypeId
FmpCapsuleImageHeader.UpdateImageIndex = Index
FmpCapsuleImageHeader.Payload = Payload
FmpCapsuleImageHeader.VendorCodeBytes = VendorCodeBytes
FmpCapsuleImageHeader.UpdateHardwareInstance = HardwareInstance
FmpCapsuleImage = FmpCapsuleImageHeader.Encode ()
FmpCapsuleData = FmpCapsuleData + FmpCapsuleImage
self._ItemOffsetList.append (Offset)
self._FmpCapsuleImageHeaderList.append (FmpCapsuleImageHeader)
Offset = Offset + len (FmpCapsuleImage)
Index = Index + 1
for Offset in self._ItemOffsetList:
FmpCapsuleHeader = FmpCapsuleHeader + struct.pack (self._ItemOffsetFormat, Offset)
self._Valid = True
return FmpCapsuleHeader + FmpCapsuleData
def Decode (self, Buffer):
if len (Buffer) < self._StructSize:
raise ValueError
(Version, EmbeddedDriverCount, PayloadItemCount) = \
struct.unpack (
self._StructFormat,
Buffer[0:self._StructSize]
)
if Version < self.EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER_INIT_VERSION:
raise ValueError
self.Version = Version
self.EmbeddedDriverCount = EmbeddedDriverCount
self.PayloadItemCount = PayloadItemCount
self._ItemOffsetList = []
self._EmbeddedDriverList = []
self._PayloadList = []
self._FmpCapsuleImageHeaderList = []
#
# Parse the ItemOffsetList values
#
Offset = self._StructSize
for Index in range (0, EmbeddedDriverCount + PayloadItemCount):
ItemOffset = struct.unpack (self._ItemOffsetFormat, Buffer[Offset:Offset + self._ItemOffsetSize])[0]
if ItemOffset >= len (Buffer):
raise ValueError
self._ItemOffsetList.append (ItemOffset)
Offset = Offset + self._ItemOffsetSize
Result = Buffer[Offset:]
#
# Parse the EmbeddedDrivers
#
for Index in range (0, EmbeddedDriverCount):
Offset = self._ItemOffsetList[Index]
if Index < (len (self._ItemOffsetList) - 1):
Length = self._ItemOffsetList[Index + 1] - Offset
else:
Length = len (Buffer) - Offset
self.AddEmbeddedDriver (Buffer[Offset:Offset + Length])
#
# Parse the Payloads that are FMP Capsule Images
#
for Index in range (EmbeddedDriverCount, EmbeddedDriverCount + PayloadItemCount):
Offset = self._ItemOffsetList[Index]
if Index < (len (self._ItemOffsetList) - 1):
Length = self._ItemOffsetList[Index + 1] - Offset
else:
Length = len (Buffer) - Offset
FmpCapsuleImageHeader = FmpCapsuleImageHeaderClass ()
FmpCapsuleImageHeader.Decode (Buffer[Offset:Offset + Length])
self.AddPayload (
FmpCapsuleImageHeader.UpdateImageTypeId,
FmpCapsuleImageHeader.Payload,
FmpCapsuleImageHeader.VendorCodeBytes
)
self._FmpCapsuleImageHeaderList.append (FmpCapsuleImageHeader)
self._Valid = True
return Result
def DumpInfo (self):
if not self._Valid:
raise ValueError
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER.Version = {Version:08X}'.format (Version = self.Version))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER.EmbeddedDriverCount = {EmbeddedDriverCount:08X}'.format (EmbeddedDriverCount = self.EmbeddedDriverCount))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER.PayloadItemCount = {PayloadItemCount:08X}'.format (PayloadItemCount = self.PayloadItemCount))
print ('EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER.ItemOffsetList = ')
for Offset in self._ItemOffsetList:
print (' {Offset:016X}'.format (Offset = Offset))
for FmpCapsuleImageHeader in self._FmpCapsuleImageHeaderList:
FmpCapsuleImageHeader.DumpInfo ()
| 46.742574
| 180
| 0.613006
|
72e8d17689f321af832e4b542e248f7553c2bab0
| 8,117
|
py
|
Python
|
microbit_app/microbit_app.py
|
voltur01/remotebit
|
e114a316f0a8b62c9cdc2e87326ef11ccdb12598
|
[
"MIT"
] | 2
|
2021-04-27T06:11:33.000Z
|
2021-06-02T06:04:18.000Z
|
microbit_app/microbit_app.py
|
voltur01/remotebit
|
e114a316f0a8b62c9cdc2e87326ef11ccdb12598
|
[
"MIT"
] | null | null | null |
microbit_app/microbit_app.py
|
voltur01/remotebit
|
e114a316f0a8b62c9cdc2e87326ef11ccdb12598
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Volodymyr Turanskyy
# https://github.com/voltur01/remotebit
from microbit import *
import music
# mbv2_begin
import gc
import radio
import speech
# mbv2_end
def escape(s: str) -> str:
return s.replace('%', '%%').replace(' ', '%20').replace('\r', '%10').replace('\n', '%13')
def unescape(s: str) -> str:
return s.replace('\n', '%13').replace('\r', '%10').replace('%20', ' ').replace('%%', '%')
# mbv2_begin
def to_bytes(msg: str) -> bytes:
return bytes([int (b) for b in unescape(msg).split()])
def from_bytes(bts: bytes) -> str:
return escape(' '.join([str(b) for b in bts]))
# mbv2_end
def confirm():
print('ok')
buttons = { 'A': button_a, 'B': button_b }
pins = [pin0, pin1, pin2, pin3, pin4, pin5, pin6, pin7, pin8, pin9, pin10,
pin11, pin12, pin13, pin14, pin15, pin16, None, None, pin19, pin20]
while True:
try:
request = input()
params = request.split(' ')
cmd = params[0]
if cmd == 'pin.read_digital':
print(pins[int(params[1])].read_digital())
elif cmd == 'pin.write_digital':
pins[int(params[1])].write_digital(int(params[2]))
confirm()
elif cmd == 'pin.read_analog':
print(pins[int(params[1])].read_analog())
elif cmd == 'pin.write_analog':
pins[int(params[1])].write_analog(int(params[2]))
confirm()
elif cmd == 'pin.set_analog_period':
pins[int(params[1])].set_analog_period(int(params[2]))
confirm()
elif cmd == 'pin.set_analog_period_microseconds':
pins[int(params[1])].set_analog_period_microseconds(int(params[2]))
confirm()
elif cmd == 'pin.is_touched':
print(pins[int(params[1])].is_touched())
elif cmd == 'button.is_pressed':
print(buttons[params[1]].is_pressed())
elif cmd == 'button.was_pressed':
print(buttons[params[1]].was_pressed())
elif cmd == 'button.get_presses':
print(buttons[params[1]].get_presses())
elif cmd == 'display.clear':
display.clear()
confirm()
elif cmd == 'display.set_pixel':
display.set_pixel(int(params[1]), int(params[2]), int(params[3]))
confirm()
elif cmd == 'display.get_pixel':
print(display.get_pixel(int(params[1]), int(params[2])))
elif cmd == 'display.show':
value_type = params[1]
value = unescape(params[2])
delay = int(params[3])
wait = params[4] == True
loop = params[5] == True
clear = params[6] == True
if value_type == 'img':
display.show(Image(value))
elif value_type == 'int':
display.show(int(value), delay, wait = wait, loop = loop, clear = clear)
elif value_type == 'fp':
display.show(float(value), delay, wait = wait, loop = loop, clear = clear)
elif value_type == 'str':
display.show(value, delay, wait = wait, loop = loop, clear = clear)
confirm()
elif cmd == 'display.scroll':
display.scroll(params[1])
confirm()
elif cmd == 'display.on':
display.on()
confirm()
elif cmd == 'display.off':
display.off()
confirm()
elif cmd == 'display.is_on':
print(display.is_on())
elif cmd == 'display.read_light_level':
print(display.read_light_level())
elif cmd == 'running_time':
print(running_time())
elif cmd == 'temperature':
print(temperature())
elif cmd == 'music.set_tempo':
music.set_tempo(ticks = int(params[1]), bpm = int(params[2]))
confirm()
elif cmd == 'music.get_tempo':
ticks, bpm = music.get_tempo()
print(str(ticks) + ' ' + str(bpm))
elif cmd == 'music.play':
music.play(unescape(params[1]).split(), pins[int(params[2])], params[3] == 'True', params[4] == 'True')
confirm()
elif cmd == 'music.pitch':
music.pitch(int(params[1]), int(params[2]), pins[int(params[3])], params[4] == 'True')
confirm()
elif cmd == 'music.stop':
music.stop(pins[int(params[1])])
confirm()
elif cmd == 'music.reset':
music.reset()
confirm()
# mbv2_begin
elif cmd == 'a.get_x':
print(accelerometer.get_x())
elif cmd == 'a.get_y':
print(accelerometer.get_y())
elif cmd == 'a.get_z':
print(accelerometer.get_z())
elif cmd == 'a.get_values':
x, y, z = accelerometer.get_values()
print(str(x) + ' ' + str(y) + ' ' + str(z))
elif cmd == 'a.current_gesture':
print(accelerometer.current_gesture())
elif cmd == 'a.is_gesture':
print(accelerometer.is_gesture(params[1]))
elif cmd == 'a.was_gesture':
print(accelerometer.was_gesture(params[1]))
elif cmd == 'a.get_gestures':
print(' '.join(accelerometer.get_gestures()))
elif cmd == 'compass.calibrate':
compass.calibrate()
confirm()
elif cmd == 'compass.is_calibrated':
print(compass.is_calibrated())
elif cmd == 'compass.clear_calibration':
compass.clear_calibration()
confirm()
elif cmd == 'compass.get_x':
print(compass.get_x())
elif cmd == 'compass.get_y':
print(compass.get_y())
elif cmd == 'compass.get_z':
print(compass.get_z())
elif cmd == 'compass.heading':
print(compass.heading())
elif cmd == 'compass.get_field_strength':
print(compass.get_field_strength())
elif cmd == 'i2c.init':
i2c.init(int(params[1]), pins[int(params[2])], pins[int(params[3])])
confirm()
elif cmd == 'i2c.scan':
print(' '.join([str(a) for a in i2c.scan()]))
elif cmd == 'i2c.read':
print(i2c.read(int(params[1]), int(params[2]), params[3] == 'True').hex())
elif cmd == 'i2c.write':
i2c.write(bytes.fromhex(params[1]), params[2] == 'True')
confirm()
elif cmd == 'radio.on':
radio.on()
confirm()
elif cmd == 'radio.off':
radio.off()
confirm()
elif cmd == 'radio.reset':
radio.reset()
confirm()
elif cmd == 'radio.send_bytes':
radio.send_bytes(to_bytes(params[1]))
confirm()
elif cmd == 'radio.receive_bytes':
msg = radio.receive_bytes()
print(from_bytes(msg) if msg else '')
elif cmd == 'speech.translate':
print(escape(speech.translate(unescape(params[1]))))
elif cmd == 'speech.pronounce':
speech.pronounce(unescape(params[1]), \
pitch=int(params[2]), speed=int(params[3]), \
mouth=int(params[4]), throat=int(params[5]))
confirm()
elif cmd == 'speech.say':
gc.collect()
speech.say(unescape(params[1]), \
pitch=int(params[2]), speed=int(params[3]), \
mouth=int(params[4]), throat=int(params[5]))
confirm()
elif cmd == 'speech.sing':
speech.sing(unescape(params[1]), \
pitch=int(params[2]), speed=int(params[3]), \
mouth=int(params[4]), throat=int(params[5]))
confirm()
elif cmd == 'speaker.on':
speaker.on()
confirm()
elif cmd == 'speaker.off':
speaker.off()
confirm()
elif cmd == 'microphone.sound_level':
print(microphone.sound_level())
# mbv2_end
else:
print('ERROR: Unknown command.')
except Exception as e:
print('EXCEPTION: ' + str(e))
| 38.837321
| 115
| 0.521868
|
e4a52c990adb23aef82aa59eab6f255c18c4e7ea
| 3,698
|
py
|
Python
|
eclipse/layers/layer2_d1.py
|
imlegend19/MDSN-DevRank
|
bb1b71f72d2fb97044a62e8e0152dadb88de6411
|
[
"MIT"
] | null | null | null |
eclipse/layers/layer2_d1.py
|
imlegend19/MDSN-DevRank
|
bb1b71f72d2fb97044a62e8e0152dadb88de6411
|
[
"MIT"
] | null | null | null |
eclipse/layers/layer2_d1.py
|
imlegend19/MDSN-DevRank
|
bb1b71f72d2fb97044a62e8e0152dadb88de6411
|
[
"MIT"
] | null | null | null |
import pickle
from itertools import permutations
import networkx as nx
import openpyxl
from local_settings_eclipse import db
"""
Layer 2 Network:
Edge between developers who commented on 2 bugs which belong to same product.
Dataset Used : eclipse
Table : test_bugs_fixed_closed, test_longdescs_fixed_closed
"""
with db:
print("Connected to db!")
cur = db.cursor()
print("Fetching developers...")
cur.execute("SELECT who FROM who_commenting_on_more_than_10_bugs")
dev = []
for i in cur.fetchall():
dev.append(i[0])
cur.execute("select distinct who from test_longdescs_fixed_closed")
filtered_who = []
for i in cur.fetchall():
filtered_who.append(i[0])
cur.execute("SELECT distinctrow product_id, bug_id from test_bugs_fixed_closed")
product_bug = {}
for i in cur.fetchall():
if i[0] in product_bug.keys():
val = product_bug[i[0]]
val.add(i[1])
product_bug[i[0]] = val
else:
product_bug[i[0]] = {i[1]}
cur.execute("SELECT distinctrow bug_id, who from test_longdescs_fixed_closed")
bug_who = {}
for i in cur.fetchall():
if i[1] in filtered_who:
if i[0] in bug_who.keys():
if i[1] in dev:
val = bug_who[i[0]]
val.add(i[1])
bug_who[i[0]] = val
else:
if i[1] in dev:
bug_who[i[0]] = {i[1]}
product_who = {}
for i in product_bug:
val = product_bug[i]
who_s = set()
for j in val:
try:
for k in bug_who[j]:
who_s.add(k)
except KeyError:
pass
product_who[i] = who_s
print("Setting up edges_normal...")
edges = set()
for i in product_who.values():
if len(list(i)) > 1:
edg = list(permutations(list(i), 2))
for j in edg:
if j[0] == j[1]:
print('err')
edges.add(j)
with open('layer2_edges_fc.txt', 'wb') as file:
pickle.dump(edges, file)
print("Saved edges_normal! Total edges_normal:", len(edges))
graph = nx.DiGraph()
graph.add_edges_from(list(edges))
neighbours = {}
for i in list(graph.nodes):
lst = list(graph.neighbors(i))
neighbours[i] = lst
print(neighbours)
path = "/home/niit1/PycharmProjects/Data-Mining-Research/eclipse/neighbours/definition_1/"
with open(path + "layer_2_neighbours.txt", 'wb') as fp:
pickle.dump(neighbours, fp)
# degrees = {}
# for (node, val) in graph.degree:
# degrees[node] = val
#
# print(degrees)
#
# path = "/home/imlegend19/PycharmProjects/Research - Data Mining/eclipse/degree_centrality/definition_1/"
# with open(path + "layer_2_degree.txt", 'wb') as fp:
# pickle.dump(degrees, fp)
print("Calculating eigenvector centrality...")
centrality = nx.eigenvector_centrality(graph)
ec = sorted(('{:0.5f}'.format(c), v) for v, c in centrality.items())
ec.reverse()
who_centrality = {}
for i in ec:
who_centrality[i[1]] = i[0]
with open("l2_d1_centrality.txt", 'wb') as fp:
pickle.dump(who_centrality, fp)
print("Setting up excel sheet...")
wb = openpyxl.Workbook()
sheet = wb.active
sheet.append(["Rank", "Id", "Centrality"])
sheet.append(["", "", ""])
print("Ranking developers...")
rank = 1
for i in ec:
sheet.append([str(rank), i[1], i[0]])
rank += 1
print("Saving...")
wb.save("layer2_ranks_fc.xlsx")
print("Process Competed!")
| 26.042254
| 110
| 0.572742
|
20aa73712d5e3cba61fd0a612dcf20d5db5a8f42
| 3,471
|
py
|
Python
|
pyBAScloudAPI/tests/setpoint_tests.py
|
bascloud/BASCloudAPI
|
6a06d430720e99204f84f5362b4f22d7d4a72b76
|
[
"MIT"
] | 3
|
2021-04-30T07:44:11.000Z
|
2021-05-03T06:35:01.000Z
|
pyBAScloudAPI/tests/setpoint_tests.py
|
bascloud/BASCloudAPI
|
6a06d430720e99204f84f5362b4f22d7d4a72b76
|
[
"MIT"
] | 9
|
2021-06-23T04:21:51.000Z
|
2022-01-17T04:15:06.000Z
|
pyBAScloudAPI/tests/setpoint_tests.py
|
bascloud/BAScloudAPI
|
6a06d430720e99204f84f5362b4f22d7d4a72b76
|
[
"MIT"
] | null | null | null |
import unittest
import time
import datetime
from definitions import *
import pyBAScloudAPI as api
BCAPI = api.EntityContext(BASCLOUD_TEST_URL)
def errorHandler(e, j):
raise e
def authenticate():
currentDateTime = int(time.time())
print("CURRENT DATETIME: ", currentDateTime)
currentDateTimeString = datetime.datetime.fromtimestamp(currentDateTime).strftime("%FT%T.000Z")
print("CURRENT DATETIME STR: ", currentDateTimeString)
BCAPI.authenticateWithUserLogin(BASCLOUD_TEST_EMAIL, BASCLOUD_TEST_PASS)
print("TOKEN: ", BCAPI.getToken())
print("EXPIRATION DATE: ", datetime.datetime.fromtimestamp(BCAPI.getTokenExpirationDate()).strftime("%FT%T.000Z"))
class TestSetPointCollection(unittest.TestCase):
def test_setpoint(self):
print("\tRequesting all setpoints...")
setpoints = BCAPI.getSetPointsCollection(BASCLOUD_TEST_TENANT_UUID, errorHandler=errorHandler)
print("\t\tOK.")
print("\tFound setpoints: ", len(setpoints[0]))
self.assertTrue(len(setpoints[0]) >= 0) # may no set points available
class TestSingleSetPoint(unittest.TestCase):
def test_setpoint(self):
print("\tRequesting single setpoint with UUID...")
setpoint = BCAPI.getSetPoint(BASCLOUD_TEST_TENANT_UUID, BASCLOUD_TEST_STEPOINT_UUID)
print("\t\tOK.")
print("\tSetpoint UUID: ", setpoint.uuid)
print("\tSetpoint Value: ", setpoint.value)
print("\tSetpoint timestamp: ", datetime.datetime.fromtimestamp(setpoint.timestamp).strftime("%FT%T.000Z"))
print("\tSetpoint created at: ", datetime.datetime.fromtimestamp(setpoint.createdAt).strftime("%FT%T.000Z"))
print("\tSetpoint updated at: ", datetime.datetime.fromtimestamp(setpoint.updatedAt).strftime("%FT%T.000Z"))
self.assertEqual(setpoint.uuid, BASCLOUD_TEST_STEPOINT_UUID)
self.assertEqual(setpoint.value, 2345.67)
self.assertTrue(setpoint.timestamp > 0)
self.assertTrue(setpoint.createdAt > 0)
self.assertTrue(setpoint.updatedAt > 0)
class TestCreateAndDeleteSetPoint(unittest.TestCase):
def test_setpoint(self):
currentDateTime = int(time.time())
new_set = BCAPI.createSetPoint(BASCLOUD_TEST_TENANT_UUID, BASCLOUD_TEST_DEVICE_UUID, 2345.67, currentDateTime)
print("\t\tOK.")
new_uuid = new_set.uuid
print("\t\tUUID: ", new_uuid)
self.assertEqual(new_set.value, 2345.67)
self.assertEqual(new_set.timestamp, currentDateTime)
# self.assertTrue(new_set.createdAt-(2*60*60) >= currentDateTime) // TODO ML: this is wrong at the moment, for some reason the backend returns timestamps -2hours
# self.assertTrue(new_set.updatedAt-(2*60*60) >= currentDateTime)
print("\tAgain Requesting all setpoints...") # Thus deactivating/inactivating them
print("\tFrom: ", datetime.datetime.fromtimestamp(currentDateTime-(60*60*24)).strftime("%FT%T.000Z"))
print("\tUntil: ", datetime.datetime.fromtimestamp(currentDateTime+(60*60*24)).strftime("%FT%T.000Z"))
setpoints = BCAPI.getSetPointsCollection(BASCLOUD_TEST_TENANT_UUID, from_=currentDateTime-(60*60*24), until=currentDateTime+(60*60*24), errorHandler=errorHandler)
print("\t\tOK.")
print("\tFound setpoints: ", len(setpoints[0]))
self.assertTrue(len(setpoints[0]) >= 1) # may no set points available
if __name__ == '__main__':
authenticate()
unittest.main()
| 36.925532
| 170
| 0.706425
|
06b4083ca1ee22bb6f067cf8606b3a7234c4bcb7
| 1,567
|
py
|
Python
|
lib/asciidocpresenters/ingredient.py
|
joshua-stone/OpenRecipeBook-Ebook
|
bad9c10301ed9e9d37fbffb0709f3954c4ce540a
|
[
"MIT"
] | null | null | null |
lib/asciidocpresenters/ingredient.py
|
joshua-stone/OpenRecipeBook-Ebook
|
bad9c10301ed9e9d37fbffb0709f3954c4ce540a
|
[
"MIT"
] | 1
|
2021-09-07T02:12:36.000Z
|
2021-09-07T02:12:36.000Z
|
lib/asciidocpresenters/ingredient.py
|
joshua-stone/OpenRecipeBook-Ebook
|
bad9c10301ed9e9d37fbffb0709f3954c4ce540a
|
[
"MIT"
] | 1
|
2020-04-28T05:15:55.000Z
|
2020-04-28T05:15:55.000Z
|
from helpers import generate_link, generate_temperature, ref_encode
class IngredientAsciidocPresenter(object):
def __init__(self, equipment):
self._data = equipment.data
@property
def data(self):
return self._data
@property
def config_id(self):
rendered = f"[[{ref_encode(self.data['config_id'])}]]"
return rendered
@property
def name(self):
rendered = f"=== {self.data['name']}"
return rendered
@property
def summary(self):
if self.data['summary']:
rendered = f"\nSummary: {self.data['summary']}\n"
else:
rendered = ''
return rendered
@property
def products(self):
table = []
for item in self.data['products']:
ingredient_name = item['name']
if 'stores' in item.keys():
stores = []
for store in item['stores']:
link = generate_link(store)
stores.append(link)
table.append((ingredient_name, stores))
flattened = []
for product, links in table:
flattened.append('| ' + product)
flattened.append('| ' + ' +\n '.join(links))
rendered = '\n'.join(['|===\n| Product | Where to buy', *flattened, '|==='])
return rendered
def render(self):
ingredient_parts = [
self.config_id,
self.name,
self.summary,
self.products,
]
return '\n'.join(ingredient_parts) + '\n'
| 24.484375
| 84
| 0.52776
|
ad12980ba2341df2e21da58a4426910ceb584154
| 10,579
|
py
|
Python
|
saas/management/commands/ledger.py
|
naqibhakimi/djaodjin-saas
|
c5b9337b21782f62ef1a5e1bbe9c6421a2dcd2df
|
[
"BSD-2-Clause"
] | null | null | null |
saas/management/commands/ledger.py
|
naqibhakimi/djaodjin-saas
|
c5b9337b21782f62ef1a5e1bbe9c6421a2dcd2df
|
[
"BSD-2-Clause"
] | null | null | null |
saas/management/commands/ledger.py
|
naqibhakimi/djaodjin-saas
|
c5b9337b21782f62ef1a5e1bbe9c6421a2dcd2df
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime, logging, re, sys
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.timezone import utc
from ...ledger import export
from ...models import Transaction
from ...utils import get_organization_model
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import/export transactions in ledger format."
requires_model_validation = False
def add_arguments(self, parser):
parser.add_argument(
"--database",
action="store",
dest="database",
default="default",
help="connect to database specified.",
)
parser.add_argument(
"--broker",
action="store",
dest="broker",
default="default",
help="broker for the site",
)
parser.add_argument(
"--create-organizations",
action="store_true",
dest="create_organizations",
default=False,
help="Create organization if it does not exist.",
)
parser.add_argument(
"subcommand",
metavar="subcommand",
nargs="+",
help="subcommand: export|import",
)
def handle(self, *args, **options):
# pylint: disable=too-many-locals
subcommand = options["subcommand"][0]
filenames = options["subcommand"][1:]
using = options["database"]
if subcommand == "export":
export(
self.stdout,
Transaction.objects.using(using).all().order_by("created_at"),
)
elif subcommand == "import":
broker = options.get("broker", None)
create_organizations = options.get("create_organizations", False)
for arg in filenames:
if arg == "-":
import_transactions(
sys.stdin, create_organizations, broker, using=using
)
else:
with open(arg) as filedesc:
import_transactions(
filedesc, create_organizations, broker, using=using
)
else:
self.stderr.write("error: unknown command: '%s'" % subcommand)
def import_transactions(
filedesc, create_organizations=False, broker=None, using="default"
):
# pylint:disable=too-many-locals
with transaction.atomic():
line = filedesc.readline()
while line != "":
look = re.match(
r"(?P<created_at>\d\d\d\d/\d\d/\d\d( \d\d:\d\d:\d\d)?)"
r"\s+(#(?P<reference>\S+) -)?(?P<descr>.*)",
line,
)
if look:
# Start of a transaction
try:
created_at = datetime.datetime.strptime(
look.group("created_at"), "%Y/%m/%d %H:%M:%S"
).replace(tzinfo=utc)
except ValueError:
created_at = datetime.datetime.strptime(
look.group("created_at"), "%Y/%m/%d"
).replace(tzinfo=utc)
if look.group("reference"):
reference = look.group("reference").strip()
else:
reference = None
descr = look.group("descr").strip()
line = filedesc.readline()
dest_organization, dest_account, dest_amount, dest_unit = parse_line(
line, create_organizations, broker=broker, using=using
)
line = filedesc.readline()
orig_organization, orig_account, orig_amount, orig_unit = parse_line(
line, create_organizations, broker=broker, using=using
)
if dest_amount < 0:
# Opening balances are shown as negative amounts
tmp_organization = dest_organization
tmp_account = dest_account
tmp_amount = dest_amount
tmp_unit = dest_unit
dest_organization = orig_organization
dest_account = orig_account
dest_amount = orig_amount
dest_unit = orig_unit
orig_organization = tmp_organization
orig_account = tmp_account
orig_amount = tmp_amount
orig_unit = tmp_unit
if dest_unit != "usd" and orig_unit == "usd":
dest_amount = -orig_amount
dest_unit = orig_unit
if not orig_amount:
orig_amount = dest_amount
if not orig_unit:
orig_unit = dest_unit
if dest_organization and orig_organization:
# Assuming no errors, at this point we have
# a full transaction.
# pylint:disable=logging-not-lazy
LOGGER.debug(
"Transaction.objects.using('%(using)s').create("
"created_at='%(created_at)s',"
"descr='%(descr)s',"
"dest_unit='%(dest_unit)s',"
"dest_amount='%(dest_amount)s',"
"dest_organization='%(dest_organization)s',"
"dest_account='%(dest_account)s',"
"orig_amount='%(orig_amount)s',"
"orig_unit='%(orig_unit)s',"
"orig_organization='%(orig_organization)s',"
"orig_account='%(orig_account)s',"
"event_id=%(event_id)s)"
% {
"using": using,
"created_at": created_at,
"descr": descr,
"dest_unit": dest_unit,
"dest_amount": dest_amount,
"dest_organization": dest_organization,
"dest_account": dest_account,
"orig_amount": dest_amount,
"orig_unit": orig_unit,
"orig_organization": orig_organization,
"orig_account": orig_account,
"event_id": "'%s'" % reference if reference else "None",
}
)
Transaction.objects.using(using).create(
created_at=created_at,
descr=descr,
dest_unit=dest_unit,
dest_amount=dest_amount,
dest_organization=dest_organization,
dest_account=dest_account,
orig_amount=dest_amount,
orig_unit=orig_unit,
orig_organization=orig_organization,
orig_account=orig_account,
event_id=reference,
)
else:
line = line.strip()
if line:
sys.stderr.write("warning: skip line '%s'\n" % line)
line = filedesc.readline()
MONEY_PAT = r"(?P<prefix>\$?)(?P<value>-?((\d|,)+(.\d+)?))\s*(?P<suffix>(\w+)?)"
def parse_line(line, create_organizations=False, broker=None, using="default"):
"""
Parse an (organization, account, amount) triplet.
"""
unit = None
amount = 0
look = re.match(r"\s+(?P<tags>\w(\w|:)+)(\s+(?P<amount>.+))?", line)
if look:
organization_slug = broker
account_parts = []
for tag in look.group("tags").split(":"):
if tag[0].islower():
organization_slug = tag
else:
account_parts += [tag]
account = ":".join(account_parts)
if look.group("amount"):
look = re.match(MONEY_PAT, look.group("amount"))
if look:
unit = look.group("prefix")
if unit is None:
unit = look.group("suffix")
elif unit == "$":
unit = "usd"
value = look.group("value").replace(",", "")
if "." in value:
amount = int(float(value) * 100)
else:
amount = int(value)
organization_model = get_organization_model()
try:
if create_organizations:
organization, _ = organization_model.objects.using(using).get_or_create(
slug=organization_slug
)
else:
organization = organization_model.objects.using(using).get(
slug=organization_slug
)
return (organization, account, amount, unit)
except organization_model.DoesNotExist:
sys.stderr.write(
"error: Cannot find Organization '%s'\n" % organization_slug
)
return (None, None, amount, unit)
| 41.324219
| 88
| 0.519992
|
bcc12a8b8b9734a9eae39c0940bd788b4c4cd817
| 2,854
|
py
|
Python
|
tests_python/examples/proto_demo_noops.py
|
Piotr170687/tezos
|
c7e84dfae2837096a4188e835fb780ad514ec2d3
|
[
"MIT"
] | 1
|
2021-11-03T08:21:08.000Z
|
2021-11-03T08:21:08.000Z
|
tests_python/examples/proto_demo_noops.py
|
Piotr170687/tezos
|
c7e84dfae2837096a4188e835fb780ad514ec2d3
|
[
"MIT"
] | null | null | null |
tests_python/examples/proto_demo_noops.py
|
Piotr170687/tezos
|
c7e84dfae2837096a4188e835fb780ad514ec2d3
|
[
"MIT"
] | 4
|
2021-04-27T15:00:34.000Z
|
2021-09-26T21:50:01.000Z
|
import time
from tools import constants, paths
from launchers.sandbox import Sandbox
PROTO_DEMO = 'ProtoDemoNoopsDemoNoopsDemoNoopsDemoNoopsDemo6XBoYp'
PROTO_GENESIS = 'ProtoGenesisGenesisGenesisGenesisGenesisGenesk612im'
PARAMS = ['-p', PROTO_GENESIS]
def forge_block_header_data(protocol_data):
"""
Returns a binary encoding for a dict of the form
`{'block_header_data: string}`, as expected by the protocol.
This corresponds to the encoding given by
`data_encoding.(obj1 (req "block_header_data" string))`. See
`lib_data_encoding/data_encoding.mli` for the spec.
"""
assert len(protocol_data) == 1 and 'block_header_data' in protocol_data
string = protocol_data['block_header_data']
tag = '0000'
padded_hex_len = f'{len(string):#06x}'[2:]
return tag + padded_hex_len + bytes(string, 'utf-8').hex()
def main():
with Sandbox(paths.TEZOS_HOME,
constants.IDENTITIES,
log_dir='tmp') as sandbox:
# launch a sandbox node
sandbox.add_node(0, params=constants.NODE_PARAMS)
client = sandbox.client(0)
protocols = client.list_protocols()
assert PROTO_DEMO in protocols
parameters = {} # type: dict
client.activate_protocol_json(PROTO_DEMO, parameters, key='activator',
fitness='1')
head = client.rpc('get', '/chains/main/blocks/head/', params=PARAMS)
# current protocol is still genesis and level == 1
assert head['header']['level'] == 1
assert head['protocol'] == PROTO_GENESIS
time.sleep(1)
# bake a block for new protocol, using fours RPCs:
# - helpers/preapply/block builds the block
# - helpers/forge_block_header encodes the whole block header
# - /injection/block injects it
message = "hello world"
data = {"protocol_data":
{"protocol": PROTO_DEMO, "block_header_data": message},
"operations": []}
block = client.rpc(
'post',
'/chains/main/blocks/head/helpers/preapply/block',
data=data,
params=PARAMS)
protocol_data = {'block_header_data': message}
encoded = forge_block_header_data(protocol_data)
shell_header = block['shell_header']
shell_header['protocol_data'] = encoded
encoded = client.rpc(
'post',
'/chains/main/blocks/head/helpers/forge_block_header',
data=shell_header,
params=PARAMS)
inject = {'data': encoded['block'], 'operations': []}
client.rpc('post', '/injection/block', data=inject, params=PARAMS)
head = client.rpc('get', '/chains/main/blocks/head/', params=PARAMS)
assert head['header']['level'] == 2
if __name__ == "__main__":
main()
| 33.97619
| 78
| 0.628591
|
e7d23f920f4d312fabc0f66c213f518269907222
| 11,141
|
py
|
Python
|
tools/convert_bdd_to_coco.py
|
keeploading/Detectron
|
bc95825306fcda093b2f652e1ab63ce9b42368ff
|
[
"Apache-2.0"
] | null | null | null |
tools/convert_bdd_to_coco.py
|
keeploading/Detectron
|
bc95825306fcda093b2f652e1ab63ce9b42368ff
|
[
"Apache-2.0"
] | null | null | null |
tools/convert_bdd_to_coco.py
|
keeploading/Detectron
|
bc95825306fcda093b2f652e1ab63ce9b42368ff
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
from PIL import Image
import numpy as np
from scipy.misc import comb
import cv2
from pycocotools import mask
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default="cityscapes_instance_only", type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default="output", type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted", default="input", type=str)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
# for Cityscapes
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000)
def getBezierPoint(polyPoint):
xvals, yvals = bezier_curve(polyPoint, nTimes=5*len(polyPoint))
point = []
for x, y in zip(xvals, yvals):
point.append([x, y])
return point[::-1]
def getPointByPoly2d(poly2d):
ann = []
curve = []
for p in poly2d:
if p[2] == "C":
curve.append([p[0], p[1]])
else:
if len(curve) > 0:
ann.extend(getBezierPoint(curve))
curve = []
ann.append([p[0], p[1]])
if poly2d[-1] == poly2d[0]:
pass
else:
return []
# np_ann = np.array(ann)
# np_ann[:, 0] -= 5
# np_ann = np_ann.tolist()
# repair = np.array(ann[::-1])
# repair[:, 0] += 5
# repair = repair.tolist()
# np_ann.extend(repair)
# ann.extend(np_ann)
return ann
def getBoxByObj(obj):
if obj.has_key("box2d"):
box2d = obj["box2d"]
return [box2d["x1"], box2d["y1"],
box2d["x2"] - box2d["x1"],
box2d["y2"] - box2d["y1"]]
else:
return []
def getPointByObj(obj):
ann = []
box2d = []
if obj.has_key("box2d"):
ann.append([[obj["box2d"]["x1"], obj["box2d"]["y1"]], [obj["box2d"]["x2"], obj["box2d"]["y2"]]])
return ann
elif obj.has_key("poly2d"):
area = getPointByPoly2d(obj["poly2d"])
if len(area) > 0:
ann.append(area)
return ann
elif obj.has_key("segments2d"):
for poly in obj["segments2d"]:
ann.append(getPointByPoly2d(poly))
return ann
def getAreaByObj(polygon_points_array, h, w, category_id):
line_type = 1 # cv2.CV_AA
color = category_id
sum = 0
for poly_points in polygon_points_array:
points = poly_points
seg = []
for j in range(len(points)):
coordx = points[j][0]
coordy = points[j][1]
point = []
point.append(int(coordx))
point.append(int(coordy))
seg.append(point)
labelMask = np.zeros((h, w))
cv2.fillPoly(labelMask, np.array([seg], dtype=np.int32), color, line_type)
mask_new, contours, hierarchy = cv2.findContours((labelMask).astype(np.uint8), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
##----------------------------------------------
polygons = []
# In practice, only one element.
for contour in contours:
contour = contour.flatten().tolist()
polygons.append(contour)
labelMask[:, :] = labelMask == color
labelMask = np.expand_dims(labelMask, axis=2)
labelMask = labelMask.astype('uint8')
labelMask = np.asfortranarray(labelMask)
Rs = mask.encode(labelMask)
sum += float(mask.area(Rs))
print ("sum:" + str(sum))
return sum, polygons
def convert_cityscapes_instance_only(
data_dir, out_dir):
"""Convert from cityscapes format to COCO instance seg format - polygons"""
sets = [
'train',
'val'
# 'images/100k/train',
# 'images/100k/val'
# 'gtFine_train',
# 'gtCoarse_train',
# 'gtCoarse_val',
# 'gtCoarse_train_extra'
]
ann_dirs = [
'annotation_train',
'annotation_val'
# 'labels/100k/train',
# 'labels/100k/val'
# 'gtFine_trainvaltest/gtFine/train',
# 'gtFine_trainvaltest/gtFine/test',
# 'gtCoarse/train',
# 'gtCoarse/train_extra',
# 'gtCoarse/val'
]
json_name = 'instancesonly_filtered_%s.json'
ends_in = '.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = ['__background__',
"bike",
"bus",
"car",
# "motor",
"person",
"rider",
"traffic light",
"traffic sign",
# "train",
"truck",
"area/alternative",
"area/drivable",
# "lane/crosswalk",
# "lane/double other",
# "lane/double white",
# "lane/double yellow",
# "lane/road curb",
# "lane/single other",
# "lane/single white",
# "lane/single yellow"
]#--------------------------------------------------------------------------------------
# Write "info"
infodata = {'info': {'description': 'This is stable 1.0 version of the 2014 MS COCO dataset.', 'url': u'http://mscoco.org', 'version': u'1.0', 'year': 2014, 'contributor': 'Microsoft COCO group', 'date_created': '2015-01-27 09:11:52.357475'}}
for data_set, ann_dir in zip(sets, ann_dirs):
print('Starting %s' % data_set)
ann_dict = {}
ann_dict["info"] = infodata["info"]
ann_dict["type"] = 'instances'
annPath = os.path.join(data_dir, 'coco_ref',
'instances_' + data_set + '2014.json')
with open(annPath) as annFile:
print ("open " + str(annFile))
cocodata = json.load(annFile)
licdata = [i for i in cocodata['licenses']]
ann_dict["licenses"] = licdata
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for root, _, files in os.walk(ann_dir):
for filename in files:
if filename.endswith(ends_in):
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
# im = Image.open(filename)
# (width, height) = im.size
image['width'] = 1280
image['height'] = 720
outmask = np.zeros((image['height'], image['width']), np.uint8)
img_dir = os.path.join(data_dir, data_set)
# image['file_name'] = img_dir + "/" + filename.split('.')[0] + ".jpg"
image['file_name'] = filename.split('.')[0] + ".jpg"
images.append(image)
# fullname = os.path.join(root, image['seg_file_name'])
# objects = cs.instances2dict_with_polygons(
# [fullname], verbose=False)[fullname]
objects = json_ann["frames"][0]["objects"]
for obj in objects:
if obj["category"] not in category_instancesonly:
continue # skip non-instance categories
index = category_instancesonly.index(obj["category"])# + 184
seg_points = getPointByObj(obj)#[[[point1],[point2]]]
seg = []
for seg_poit in seg_points:
seg.extend(sum(seg_poit, []))
if len(seg) == 0:
print('Warning: invalid segmentation.')
continue
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
category_dict[obj["category"]] = index
ann['category_id'] = index
ann['iscrowd'] = 0
if obj.has_key("box2d"):
ann['bbox'] = getBoxByObj(obj)
else:
ann['area'], ann['segmentation'] = getAreaByObj(seg_points, image['height'], image['width'], ann['category_id'])
ann['bbox'] = bboxs_util.xyxy_to_xywh(segms_util.polys_to_boxes(
[ann['segmentation']])).tolist()[0]
annotations.append(ann)
# break
ann_dict['images'] = images
# category_dict.values()
# categories = [{"id": category_dict[name], "name": name} for name in category_dict]
categories = []
for index, value in enumerate(category_instancesonly):
categories.append({"id": index, "name": value})
categories = categories[1:]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
# args.datadir = "/media/administrator/deeplearning/dataset/bdd100k"
# args.outdir = "/media/administrator/deeplearning/project/detectron/detectron/datasets/data/bdd/annotations"
convert_cityscapes_instance_only(args.datadir, args.outdir)
| 33.863222
| 246
| 0.528498
|
9e0d8831afb1fe70af0ff97be00ba9d28c952481
| 11,785
|
py
|
Python
|
my_vim_files/python27/Lib/distutils/version.py
|
satsaeid/dotfiles
|
401c3213b31dd941b44e553c6f0441187b01c19a
|
[
"MIT"
] | null | null | null |
my_vim_files/python27/Lib/distutils/version.py
|
satsaeid/dotfiles
|
401c3213b31dd941b44e553c6f0441187b01c19a
|
[
"MIT"
] | null | null | null |
my_vim_files/python27/Lib/distutils/version.py
|
satsaeid/dotfiles
|
401c3213b31dd941b44e553c6f0441187b01c19a
|
[
"MIT"
] | null | null | null |
#
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id: version.py 70642 2009-03-28 00:48:48Z georg.brandl $
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
| 39.283333
| 80
| 0.644039
|
2c641ff5cc60634519fbfbbe5065aad9fd04494c
| 11,657
|
py
|
Python
|
Redwood.py
|
RK900/Tree-Building
|
80abbafd6e702772396f1cd1cccab0c7e68f5628
|
[
"MIT"
] | 2
|
2016-07-26T04:34:37.000Z
|
2017-07-03T02:01:23.000Z
|
Redwood.py
|
RK900/Tree-Building
|
80abbafd6e702772396f1cd1cccab0c7e68f5628
|
[
"MIT"
] | 1
|
2016-11-06T17:34:15.000Z
|
2017-02-13T04:40:04.000Z
|
Redwood.py
|
RK900/Tree-Building
|
80abbafd6e702772396f1cd1cccab0c7e68f5628
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 11:16:39 2015
@author: Rohan Koodli
"""
class Tree(object):#Version 2
"""
Tree Node
"""
def __init__(self, name='Aves', children=None):
self.name = name
self.children = []
if children is not None:
for child in children:
self.add_child(child)
def __repr__(self, level=0): #Makes printable representation of Tree
ret = "\t"*level+repr(self.name)+"\n"
for child in self.children:
ret += child.__repr__(level+1)
return ret
def add_child(self, node):
"""
Adds node
:param node: node to be added
:return: void
"""
self.children.append(node)
def disp_tree(self):
"""
Duplicate method of printing tree
:return: Printable representation of a tree
"""
if self is not None:
print (self.name)
for child in self.children:
#print '---'
if child is not None:
print ('\t', child.disp_tree())
def disp_tree_v2(self,level=0):
if self is not None:
print (self.name)
ret = '\t'*level
for child in self.children:
if child is not None:
print (child.disp_tree(level+1))
def iterate(self):
for i in self.name:
print (i)
'''
#Tree("Root", [Tree("1")])
Root_node = Tree("This is the Root")
node1 = Tree("1")
node2 = Tree("2")
node21= Tree("2.1")
node3 = Tree("3")
Root_node.add_child(node1)
node1.add_child(node2)
node1.add_child(node21)
node2.add_child(node3)
#Root_node.disp_tree()
#print "The End"
'''
def treefromFile(source):
"""
Reads tree from a TXT file
:param source: path
:return: void
"""
source.replace(',','')
source = source.split() #Puts words in list
root = Tree('Aves')
temp = root
for i in range(len(source)):
temp2 = Tree(source[i])
temp.add_child(temp2)
temp = temp2
return root
def merge_trees(tree1,tree2):
"""
Merge 2 trees together
:param tree1: Tree 1
:param tree2: Tree 2
:return: void
"""
if (tree1 is not None) and (tree2 is not None):
mergedTree = Tree('Aves') #root node
mergedRoot = mergedTree
flag = True
iter1 = tree1.children
iter2 = tree2.children
#print 'Start with appending tree1 as is..'
append_trees(mergedTree, iter1)
child_iter = iter1
while iter1 and iter2:
i = 0
children_name_list = []
for k in range(len(child_iter)):
#print 'k =',k
# Make the list of all the names of child_iter
children_name_list.append(child_iter[k].name)
#print 'List of children:', children_name_list
if iter2[0].name in children_name_list:
i = children_name_list.index(iter2[0].name)
#print' Found a match:', child_iter[i].name, iter2[0].name
#print "So far so good."
mergedTree = child_iter[i]
child_iter = child_iter[i].children #moving iter to next node
iter2 = iter2[0].children
#print 'resetting: Iter2[0]',iter2[0].name
else:
#print 'child_iter', child_iter
flag = False
#print 'break out of while loop'
break
iter1 = child_iter
if flag == False:
#print 'i, child_iter:',i,child_iter, 'Iter2',iter2
temp2 = Tree(iter2[0].name)
#print 'Adding child to mergedTree:', iter2[0].name
mergedTree.add_child(temp2)
append_trees(temp2,iter2[0].children)
#print 'mergedTree.name: ',mergedTree.name
return mergedRoot
def append_trees(parent_tree,tree_children):
"""
Adds 2 trees together
:param parent_tree: Parent tree
:param tree_children: Child tree
:return: void
"""
if tree_children:
for j in range(len(tree_children)):
#print 'j,tree_children[]: ',j, tree_children[j].name
parent_tree.add_child(tree_children[j])
parent_tree = parent_tree.children[0]
tree_children = tree_children[0].children
#First version of append_trees
def compare_node(parent_node,child_node): #parent and child are Tree objects
"""
Compares 2 nodes
:param parent_node: Node 1
:param child_node: Node 2
:return: Difference
"""
differences = []
parent = parent_node.name
child_node = parent_node.children[0]
child = child_node.name
if len(parent) > len(child):
for i in range(len(child)):
if parent[i] != child[i]:
differences.append(i)
for k in range(i+1,len(parent)):
differences.append(k)
else:
for i in range(len(parent)):
if parent[i] != child[i]:
differences.append(i)
for k in range(i+1,len(child)):
differences.append(k)
print (len(differences))
def compare_strains(s1,s2):#both type bio.seq.seq
differences = []
if len(s1) < len(s2):
pass
#not finished yet
'''
def append_trees_v0(parent_tree ,tree_children):
#parent_tree is a Tree and tree_children is a list
tempvar1 = parent_tree
tempvar2 = Tree(tree_children[0].name)
#print type(tempvar1)
while tree_children:
#print type(tempvar1)
#parent_tree.disp_tree()
print 'separated node tree:'
parent_tree.disp_tree()
print '------END----------'
parent_tree.add_child(tree_children[0])
print 'parent_tree: ',parent_tree
print 'inside append_trees: ',tree_children[0].name
parent_tree = parent_tree.children[0]
tree_children = tree_children[0].children
#tempvar2 = tempvar2.children
def append_trees_v0(parent_tree,tree_children):
#parent_tree is a Tree and tree_children is a list
#print type(tempvar1)
if tree_children:
#print type(tempvar1)
#parent_tree.disp_tree()
#print 'separated node tree:'
#parent_tree.disp_tree()
#print '------END----------'
parent_tree.add_child(tree_children[0])
#print 'parent_tree: ',parent_tree
print 'inside append_trees: ',tree_children[0].name
parent_tree = parent_tree.children[0]
tree_children = tree_children[0].children
#tempvar2 = tempvar2.children
def merge_trees_alpha(tree1,tree2):
if (tree1 is not None) and (tree2 is not None):
mergedTree = Tree('Aves_Merged') #root node
mergedRoot = mergedTree
flag = True
iter1 = tree1.children
iter2 = tree2.children
# go thru each level of the tree
while iter1 and iter2:
if iter1[0].name == iter2[0].name:
#print "So far so good."
#print 'Iter1[0]',iter1[0].name, 'Iter2[0]',iter2[0].name
temp = Tree(iter1[0].name)
mergedTree.add_child(temp)
iter1 = iter1[0].children #moving iter to next node
iter2 = iter2[0].children
mergedTree = temp
#print 'resetting: Iter1[0]',iter1[0].name, 'resetting: Iter2[0]',iter2[0].name
continue
else:
flag = False
temp1 = Tree(iter1[0].name)
temp2 = Tree(iter2[0].name)
mergedTree.add_child(temp1)
mergedTree.add_child(temp2)
#print 'mergedTree.name: ',mergedTree.name
#print 'temp1: ',temp1
break
if flag == False:
#print "Trees differ"
#print 'now appending temp1'
append_trees(temp1,iter1[0].children)
#print 'tree 1: '
#temp1.disp_tree()
#print 'now appending temp2'
append_trees(temp2,iter2[0].children)
#print 'printting tree2'
#temp2.disp_tree()
#print 'end'
else:
print 'Trees are same'
return mergedRoot
def merge_trees_beta(tree1,tree2):
print '[[[[[[[Inside v2]]]]]]]]]'
#qwerty = input('Enter a key ')
if (tree1 is not None) and (tree2 is not None):
mergedTree = Tree('Aves_Merged') #root node
mergedRoot = mergedTree
flag = True
iter1 = tree1.children
iter2 = tree2.children
#mergedTree = mergedTree.children
#print (iter1)
#print (iter2)
# go thru each level of the tree
child_iter = iter1
while iter1 and iter2:
#qwerty = raw_input('Enter a key a ')
i = 0
print 'iter1:', iter1, 'iter2:', iter2
#qwerty = raw_input('Enter a key b ')
#if child_iter[i].name == iter2[0].name:
print 'No.of children is: ', len(child_iter)
#while i < len(child_iter):
children_name_list = []
#print 'type-of--children-list----------',type(children_name_list)
for k in range(len(child_iter)):
print 'k =',k
# Make the list of all the names of child_iter
children_name_list.append(child_iter[k].name)
print 'List of children:', children_name_list
#qwerty = raw_input('Enter a key d ')
if iter2[0].name in children_name_list:
print' Comparing:', child_iter[i].name, iter2[0].name
#if child_iter[i].name == iter2[0].name:
i = children_name_list.index(iter2[0].name)
print "So far so good."
print 'child_iter[%s]'%i,child_iter[i].name, 'Iter2[0]',iter2[0].name
temp = Tree(child_iter[i].name)
mergedTree.add_child(temp)
child_iter = child_iter[i].children #moving iter to next node
iter2 = iter2[0].children
mergedTree = temp
#print 'resetting: child_iter',child_iter[i].name
print 'resetting: Iter2[0]',iter2[0].name
#qwerty = raw_input('Enter a key e ')
else:
child_iter = iter1
print 'child_iter', child_iter
flag = False
print 'trying to break out of while loop'
#qwerty = input('Enter a key c ')
#qwerty = raw_input('Enter a key f ')
break
#i += 1
iter1 = child_iter
if flag == False:
print 'i, child_iter:',i,child_iter, 'Iter2',iter2
for k in range(len(child_iter)):
temp1 = Tree(child_iter[k].name)
mergedTree.add_child(temp1)
print 'Adding child to mergedTree:', child_iter[k].name
append_trees(temp1,child_iter[k].children)
temp2 = Tree(iter2[0].name)
print 'Adding child to mergedTree:', iter2[0].name
mergedTree.add_child(temp2)
append_trees(temp2,iter2[0].children)
print 'mergedTree.name: ',mergedTree.name
#print 'temp1: ',temp1
return mergedRoot
'''
| 32.83662
| 95
| 0.539504
|
09b812d7e92cfa8ac52b85806bbaed9770e462e8
| 46,701
|
py
|
Python
|
tests/jsonpickle_test.py
|
mscuthbert/jsonpickle
|
59bff149bfe6e99f3694e3fb7c58cdd023d8298e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/jsonpickle_test.py
|
mscuthbert/jsonpickle
|
59bff149bfe6e99f3694e3fb7c58cdd023d8298e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/jsonpickle_test.py
|
mscuthbert/jsonpickle
|
59bff149bfe6e99f3694e3fb7c58cdd023d8298e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import doctest
import os
import unittest
import collections
import jsonpickle
import jsonpickle.backend
import jsonpickle.handlers
from jsonpickle import tags, util
from jsonpickle.compat import unicode
from jsonpickle.compat import unichr
from jsonpickle.compat import PY32, PY3
from helper import SkippableTest
class Thing(object):
def __init__(self, name):
self.name = name
self.child = None
def __repr__(self):
return 'Thing("%s")' % self.name
class Capture(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class ThingWithProps(object):
def __init__(self, name='', dogs='reliable', monkies='tricksy'):
self.name = name
self._critters = (('dogs', dogs), ('monkies', monkies))
def _get_identity(self):
keys = [self.dogs, self.monkies, self.name]
return hash('-'.join([str(key) for key in keys]))
identity = property(_get_identity)
def _get_dogs(self):
return self._critters[0][1]
dogs = property(_get_dogs)
def _get_monkies(self):
return self._critters[1][1]
monkies = property(_get_monkies)
def __getstate__(self):
out = dict(
__identity__=self.identity,
nom=self.name,
dogs=self.dogs,
monkies=self.monkies,
)
return out
def __setstate__(self, state_dict):
self._critters = (('dogs', state_dict.get('dogs')),
('monkies', state_dict.get('monkies')))
self.name = state_dict.get('nom', '')
ident = state_dict.get('__identity__')
if ident != self.identity:
raise ValueError('expanded object does not match originial state!')
def __eq__(self, other):
return self.identity == other.identity
class PicklingTestCase(unittest.TestCase):
def setUp(self):
self.pickler = jsonpickle.pickler.Pickler()
self.unpickler = jsonpickle.unpickler.Unpickler()
def tearDown(self):
self.pickler.reset()
self.unpickler.reset()
def test_string(self):
self.assertEqual('a string', self.pickler.flatten('a string'))
self.assertEqual('a string', self.unpickler.restore('a string'))
def test_unicode(self):
self.assertEqual(unicode('a string'),
self.pickler.flatten('a string'))
self.assertEqual(unicode('a string'),
self.unpickler.restore('a string'))
def test_int(self):
self.assertEqual(3, self.pickler.flatten(3))
self.assertEqual(3, self.unpickler.restore(3))
def test_float(self):
self.assertEqual(3.5, self.pickler.flatten(3.5))
self.assertEqual(3.5, self.unpickler.restore(3.5))
def test_boolean(self):
self.assertTrue(self.pickler.flatten(True))
self.assertFalse(self.pickler.flatten(False))
self.assertTrue(self.unpickler.restore(True))
self.assertFalse(self.unpickler.restore(False))
def test_none(self):
self.assertTrue(self.pickler.flatten(None) is None)
self.assertTrue(self.unpickler.restore(None) is None)
def test_list(self):
# multiple types of values
listA = [1, 35.0, 'value']
self.assertEqual(listA, self.pickler.flatten(listA))
self.assertEqual(listA, self.unpickler.restore(listA))
# nested list
listB = [40, 40, listA, 6]
self.assertEqual(listB, self.pickler.flatten(listB))
self.assertEqual(listB, self.unpickler.restore(listB))
# 2D list
listC = [[1, 2], [3, 4]]
self.assertEqual(listC, self.pickler.flatten(listC))
self.assertEqual(listC, self.unpickler.restore(listC))
# empty list
listD = []
self.assertEqual(listD, self.pickler.flatten(listD))
self.assertEqual(listD, self.unpickler.restore(listD))
def test_set(self):
setlist = ['orange', 'apple', 'grape']
setA = set(setlist)
flattened = self.pickler.flatten(setA)
for s in setlist:
self.assertTrue(s in flattened[tags.SET])
setA_pickle = {tags.SET: setlist}
self.assertEqual(setA, self.unpickler.restore(setA_pickle))
def test_dict(self):
dictA = {'key1': 1.0, 'key2': 20, 'key3': 'thirty',
tags.JSON_KEY + '6': 6}
self.assertEqual(dictA, self.pickler.flatten(dictA))
self.assertEqual(dictA, self.unpickler.restore(dictA))
dictB = {}
self.assertEqual(dictB, self.pickler.flatten(dictB))
self.assertEqual(dictB, self.unpickler.restore(dictB))
def test_tuple(self):
# currently all collections are converted to lists
tupleA = (4, 16, 32)
tupleA_pickle = {tags.TUPLE: [4, 16, 32]}
self.assertEqual(tupleA_pickle, self.pickler.flatten(tupleA))
self.assertEqual(tupleA, self.unpickler.restore(tupleA_pickle))
tupleB = (4,)
tupleB_pickle = {tags.TUPLE: [4]}
self.assertEqual(tupleB_pickle, self.pickler.flatten(tupleB))
self.assertEqual(tupleB, self.unpickler.restore(tupleB_pickle))
def test_tuple_roundtrip(self):
data = (1, 2, 3)
newdata = jsonpickle.decode(jsonpickle.encode(data))
self.assertEqual(data, newdata)
def test_set_roundtrip(self):
data = set([1, 2, 3])
newdata = jsonpickle.decode(jsonpickle.encode(data))
self.assertEqual(data, newdata)
def test_list_roundtrip(self):
data = [1, 2, 3]
newdata = jsonpickle.decode(jsonpickle.encode(data))
self.assertEqual(data, newdata)
def test_class(self):
inst = Thing('test name')
inst.child = Thing('child name')
flattened = self.pickler.flatten(inst)
self.assertEqual('test name', flattened['name'])
child = flattened['child']
self.assertEqual('child name', child['name'])
inflated = self.unpickler.restore(flattened)
self.assertEqual('test name', inflated.name)
self.assertTrue(type(inflated) is Thing)
self.assertEqual('child name', inflated.child.name)
self.assertTrue(type(inflated.child) is Thing)
def test_classlist(self):
array = [Thing('one'), Thing('two'), 'a string']
flattened = self.pickler.flatten(array)
self.assertEqual('one', flattened[0]['name'])
self.assertEqual('two', flattened[1]['name'])
self.assertEqual('a string', flattened[2])
inflated = self.unpickler.restore(flattened)
self.assertEqual('one', inflated[0].name)
self.assertTrue(type(inflated[0]) is Thing)
self.assertEqual('two', inflated[1].name)
self.assertTrue(type(inflated[1]) is Thing)
self.assertEqual('a string', inflated[2])
def test_classdict(self):
dict = {'k1': Thing('one'), 'k2': Thing('two'), 'k3': 3}
flattened = self.pickler.flatten(dict)
self.assertEqual('one', flattened['k1']['name'])
self.assertEqual('two', flattened['k2']['name'])
self.assertEqual(3, flattened['k3'])
inflated = self.unpickler.restore(flattened)
self.assertEqual('one', inflated['k1'].name)
self.assertTrue(type(inflated['k1']) is Thing)
self.assertEqual('two', inflated['k2'].name)
self.assertTrue(type(inflated['k2']) is Thing)
self.assertEqual(3, inflated['k3'])
def test_recursive(self):
"""create a recursive structure and test that we can handle it
"""
parent = Thing('parent')
child = Thing('child')
child.sibling = Thing('sibling')
parent.self = parent
parent.child = child
parent.child.twin = child
parent.child.parent = parent
parent.child.sibling.parent = parent
cloned = jsonpickle.decode(jsonpickle.encode(parent))
self.assertEqual(parent.name,
cloned.name)
self.assertEqual(parent.child.name,
cloned.child.name)
self.assertEqual(parent.child.sibling.name,
cloned.child.sibling.name)
self.assertEqual(cloned,
cloned.child.parent)
self.assertEqual(cloned,
cloned.child.sibling.parent)
self.assertEqual(cloned,
cloned.child.twin.parent)
self.assertEqual(cloned.child,
cloned.child.twin)
def test_tuple_notunpicklable(self):
self.pickler.unpicklable = False
flattened = self.pickler.flatten(('one', 2, 3))
self.assertEqual(flattened, ['one', 2, 3])
def test_set_not_unpicklable(self):
self.pickler.unpicklable = False
flattened = self.pickler.flatten(set(['one', 2, 3]))
self.assertTrue('one' in flattened)
self.assertTrue(2 in flattened)
self.assertTrue(3 in flattened)
self.assertTrue(isinstance(flattened, list))
def test_thing_with_module(self):
obj = Thing('with-module')
obj.themodule = os
flattened = self.pickler.flatten(obj)
inflated = self.unpickler.restore(flattened)
self.assertEqual(inflated.themodule, os)
def test_thing_with_module_safe(self):
obj = Thing('with-module')
obj.themodule = os
flattened = self.pickler.flatten(obj)
self.unpickler.safe = True
inflated = self.unpickler.restore(flattened)
self.assertEqual(inflated.themodule, None)
def test_thing_with_submodule(self):
from distutils import sysconfig
obj = Thing('with-submodule')
obj.submodule = sysconfig
flattened = self.pickler.flatten(obj)
inflated = self.unpickler.restore(flattened)
self.assertEqual(inflated.submodule, sysconfig)
def test_type_reference(self):
"""This test ensures that users can store references to types.
"""
obj = Thing('object-with-type-reference')
# reference the built-in 'object' type
obj.typeref = object
flattened = self.pickler.flatten(obj)
self.assertEqual(flattened['typeref'],
{tags.TYPE: '__builtin__.object'})
inflated = self.unpickler.restore(flattened)
self.assertEqual(inflated.typeref, object)
def test_class_reference(self):
"""This test ensures that users can store references to classes.
"""
obj = Thing('object-with-class-reference')
# reference the 'Thing' class (not an instance of the class)
obj.classref = Thing
flattened = self.pickler.flatten(obj)
self.assertEqual(flattened['classref'],
{tags.TYPE: 'jsonpickle_test.Thing'})
inflated = self.unpickler.restore(flattened)
self.assertEqual(inflated.classref, Thing)
def test_supports_getstate_setstate(self):
obj = ThingWithProps('object-which-defines-getstate-setstate')
flattened = self.pickler.flatten(obj)
self.assertTrue(flattened[tags.STATE].get('__identity__'))
self.assertTrue(flattened[tags.STATE].get('nom'))
inflated = self.unpickler.restore(flattened)
self.assertEqual(obj, inflated)
def test_references(self):
obj_a = Thing('foo')
obj_b = Thing('bar')
coll = [obj_a, obj_b, obj_b]
flattened = self.pickler.flatten(coll)
inflated = self.unpickler.restore(flattened)
self.assertEqual(len(inflated), len(coll))
for x in range(len(coll)):
self.assertEqual(repr(coll[x]), repr(inflated[x]))
def test_references_in_number_keyed_dict(self):
"""
Make sure a dictionary with numbers as keys and objects as values
can make the round trip.
Because JSON must coerce integers to strings in dict keys, the sort
order may have a tendency to change between pickling and unpickling,
and this could affect the object references.
"""
one = Thing('one')
two = Thing('two')
twelve = Thing('twelve')
two.child = twelve
obj = {
1: one,
2: two,
12: twelve,
}
self.assertNotEqual(list(sorted(obj.keys())),
list(map(int, sorted(map(str, obj.keys())))))
flattened = self.pickler.flatten(obj)
inflated = self.unpickler.restore(flattened)
self.assertEqual(len(inflated), 3)
self.assertEqual(inflated['12'].name, 'twelve')
def test_builtin_error(self):
expect = AssertionError
json = jsonpickle.encode(expect)
actual = jsonpickle.decode(json)
self.assertEqual(expect, actual)
self.assertTrue(expect is actual)
class JSONPickleTestCase(SkippableTest):
def setUp(self):
self.obj = Thing('A name')
self.expected_json = (
'{"%s": "jsonpickle_test.Thing", "name": "A name", "child": null}'
% tags.OBJECT
)
def test_encode(self):
expect = self.obj
pickle = jsonpickle.encode(self.obj)
actual = jsonpickle.decode(pickle)
self.assertEqual(expect.name, actual.name)
self.assertEqual(expect.child, actual.child)
def test_encode_notunpicklable(self):
expect = {'name': 'A name', 'child': None}
pickle = jsonpickle.encode(self.obj, unpicklable=False)
actual = jsonpickle.decode(pickle)
self.assertEqual(expect['name'], actual['name'])
def test_decode(self):
actual = jsonpickle.decode(self.expected_json)
self.assertEqual(self.obj.name, actual.name)
self.assertEqual(type(self.obj), type(actual))
def test_json(self):
expect = self.obj
pickle = jsonpickle.encode(self.obj)
actual = jsonpickle.decode(pickle)
self.assertEqual(actual.name, expect.name)
self.assertEqual(actual.child, expect.child)
actual = jsonpickle.decode(self.expected_json)
self.assertEqual(self.obj.name, actual.name)
self.assertEqual(type(self.obj), type(actual))
def test_unicode_dict_keys(self):
uni = unichr(0x1234)
pickle = jsonpickle.encode({uni: uni})
actual = jsonpickle.decode(pickle)
self.assertTrue(uni in actual)
self.assertEqual(actual[uni], uni)
def test_tuple_dict_keys_default(self):
"""Test that we handle dictionaries with tuples as keys."""
tuple_dict = {(1, 2): 3, (4, 5): {(7, 8): 9}}
pickle = jsonpickle.encode(tuple_dict)
expect = {'(1, 2)': 3, '(4, 5)': {'(7, 8)': 9}}
actual = jsonpickle.decode(pickle)
self.assertEqual(expect, actual)
tuple_dict = {(1, 2): [1, 2]}
pickle = jsonpickle.encode(tuple_dict)
actual = jsonpickle.decode(pickle)
self.assertEqual(actual['(1, 2)'], [1, 2])
def test_tuple_dict_keys_with_keys_enabled(self):
"""Test that we handle dictionaries with tuples as keys."""
tuple_dict = {(1, 2): 3, (4, 5): {(7, 8): 9}}
pickle = jsonpickle.encode(tuple_dict, keys=True)
expect = tuple_dict
actual = jsonpickle.decode(pickle, keys=True)
self.assertEqual(expect, actual)
tuple_dict = {(1, 2): [1, 2]}
pickle = jsonpickle.encode(tuple_dict, keys=True)
actual = jsonpickle.decode(pickle, keys=True)
self.assertEqual(actual[(1, 2)], [1, 2])
def test_None_dict_key_default(self):
# We do string coercion for non-string keys so None becomes 'None'
expect = {'null': None}
obj = {None: None}
pickle = jsonpickle.encode(obj)
actual = jsonpickle.decode(pickle)
self.assertEqual(expect, actual)
def test_None_dict_key_with_keys_enabled(self):
expect = {None: None}
obj = {None: None}
pickle = jsonpickle.encode(obj, keys=True)
actual = jsonpickle.decode(pickle, keys=True)
self.assertEqual(expect, actual)
def test_object_dict_keys(self):
"""Test that we handle random objects as keys.
"""
thing = Thing('random')
pickle = jsonpickle.encode({thing: True})
actual = jsonpickle.decode(pickle)
self.assertEqual(actual, {unicode('Thing("random")'): True})
def test_int_dict_keys_defaults(self):
int_dict = {1000: [1, 2]}
pickle = jsonpickle.encode(int_dict)
actual = jsonpickle.decode(pickle)
self.assertEqual(actual['1000'], [1, 2])
def test_int_dict_keys_with_keys_enabled(self):
int_dict = {1000: [1, 2]}
pickle = jsonpickle.encode(int_dict, keys=True)
actual = jsonpickle.decode(pickle, keys=True)
self.assertEqual(actual[1000], [1, 2])
def test_string_key_requiring_escape_dict_keys_with_keys_enabled(self):
json_key_dict = {tags.JSON_KEY + '6': [1, 2]}
pickled = jsonpickle.encode(json_key_dict, keys=True)
unpickled = jsonpickle.decode(pickled, keys=True)
self.assertEqual(unpickled[tags.JSON_KEY + '6'], [1, 2])
def test_string_key_not_requiring_escape_dict_keys_with_keys_enabled(self):
"""test that string keys that do not require escaping are not escaped"""
str_dict = {'name': [1, 2]}
pickled = jsonpickle.encode(str_dict, keys=True)
unpickled = jsonpickle.decode(pickled)
self.assertTrue('name' in unpickled)
def test_list_of_objects(self):
"""Test that objects in lists are referenced correctly"""
a = Thing('a')
b = Thing('b')
pickle = jsonpickle.encode([a, b, b])
actual = jsonpickle.decode(pickle)
self.assertEqual(actual[1], actual[2])
self.assertEqual(type(actual[0]), Thing)
self.assertEqual(actual[0].name, 'a')
self.assertEqual(actual[1].name, 'b')
self.assertEqual(actual[2].name, 'b')
def test_refs_keys_values(self):
"""Test that objects in dict keys are referenced correctly
"""
j = Thing('random')
object_dict = {j: j}
pickle = jsonpickle.encode(object_dict, keys=True)
actual = jsonpickle.decode(pickle, keys=True)
self.assertEqual(list(actual.keys()), list(actual.values()))
def test_object_keys_to_list(self):
"""Test that objects in dict values are referenced correctly
"""
j = Thing('random')
object_dict = {j: [j, j]}
pickle = jsonpickle.encode(object_dict, keys=True)
actual = jsonpickle.decode(pickle, keys=True)
obj = list(actual.keys())[0]
self.assertEqual(j.name, obj.name)
self.assertTrue(obj is actual[obj][0])
self.assertTrue(obj is actual[obj][1])
def test_refs_in_objects(self):
"""Test that objects in lists are referenced correctly"""
a = Thing('a')
b = Thing('b')
pickle = jsonpickle.encode([a, b, b])
actual = jsonpickle.decode(pickle)
self.assertNotEqual(actual[0], actual[1])
self.assertEqual(actual[1], actual[2])
self.assertTrue(actual[1] is actual[2])
def test_refs_recursive(self):
"""Test that complicated recursive refs work"""
a = Thing('a')
a.self_list = [Thing('0'), Thing('1'), Thing('2')]
a.first = a.self_list[0]
a.stuff = {a.first: a.first}
a.morestuff = {a.self_list[1]: a.stuff}
pickle = jsonpickle.encode(a, keys=True)
b = jsonpickle.decode(pickle, keys=True)
item = b.self_list[0]
self.assertEqual(b.first, item)
self.assertEqual(b.stuff[b.first], item)
self.assertEqual(b.morestuff[b.self_list[1]][b.first], item)
def test_load_backend(self):
"""Test that we can call jsonpickle.load_backend()
"""
if PY32:
return self.skip('no simplejson for python 3.2')
jsonpickle.load_backend('simplejson', 'dumps', 'loads', ValueError)
self.assertTrue(True)
def test_set_preferred_backend_allows_magic(self):
"""Tests that we can use the pluggable backends magically
"""
backend = 'os.path'
jsonpickle.load_backend(backend, 'split', 'join', AttributeError)
jsonpickle.set_preferred_backend(backend)
slash_hello, world = jsonpickle.encode('/hello/world')
jsonpickle.remove_backend(backend)
self.assertEqual(slash_hello, '/hello')
self.assertEqual(world, 'world')
def test_load_backend_submodule(self):
"""Test that we can load a submodule as a backend
"""
jsonpickle.load_backend('os.path', 'split', 'join', AttributeError)
self.assertTrue('os.path' in jsonpickle.json._backend_names and
'os.path' in jsonpickle.json._encoders and
'os.path' in jsonpickle.json._decoders and
'os.path' in jsonpickle.json._encoder_options and
'os.path' in jsonpickle.json._decoder_exceptions)
def _backend_is_partially_loaded(self, backend):
"""Return True if the specified backend is incomplete"""
return (backend in jsonpickle.json._backend_names or
backend in jsonpickle.json._encoders or
backend in jsonpickle.json._decoders or
backend in jsonpickle.json._encoder_options or
backend in jsonpickle.json._decoder_exceptions)
def test_load_backend_handles_bad_encode(self):
"""Test that we ignore bad encoders"""
load_backend = jsonpickle.load_backend
self.assertFalse(load_backend('os.path', 'bad!', 'split',
AttributeError))
self.failIf(self._backend_is_partially_loaded('os.path'))
def test_load_backend_raises_on_bad_decode(self):
"""Test that we ignore bad decoders"""
load_backend = jsonpickle.load_backend
self.assertFalse(load_backend('os.path', 'join', 'bad!',
AttributeError))
self.failIf(self._backend_is_partially_loaded('os.path'))
def test_load_backend_handles_bad_loads_exc(self):
"""Test that we ignore bad decoder exceptions"""
load_backend = jsonpickle.load_backend
self.assertFalse(load_backend('os.path', 'join', 'split', 'bad!'))
self.failIf(self._backend_is_partially_loaded('os.path'))
def test_list_item_reference(self):
thing = Thing('parent')
thing.child = Thing('child')
thing.child.refs = [thing]
encoded = jsonpickle.encode(thing)
decoded = jsonpickle.decode(encoded)
self.assertEqual(id(decoded.child.refs[0]), id(decoded))
def test_reference_to_list(self):
thing = Thing('parent')
thing.a = [1]
thing.b = thing.a
thing.b.append(thing.a)
thing.b.append([thing.a])
encoded = jsonpickle.encode(thing)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.a[0], 1)
self.assertEqual(decoded.b[0], 1)
self.assertEqual(id(decoded.a), id(decoded.b))
self.assertEqual(id(decoded.a), id(decoded.a[1]))
self.assertEqual(id(decoded.a), id(decoded.a[2][0]))
def test_make_refs_disabled_list(self):
obj_a = Thing('foo')
obj_b = Thing('bar')
coll = [obj_a, obj_b, obj_b]
encoded = jsonpickle.encode(coll, make_refs=False)
decoded = jsonpickle.decode(encoded)
self.assertEqual(len(decoded), 3)
self.assertTrue(decoded[0] is not decoded[1])
self.assertTrue(decoded[1] is not decoded[2])
def test_make_refs_disabled_reference_to_list(self):
thing = Thing('parent')
thing.a = [1]
thing.b = thing.a
thing.b.append(thing.a)
thing.b.append([thing.a])
encoded = jsonpickle.encode(thing, make_refs=False)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.a[0], 1)
self.assertEqual(decoded.b[0:3], '[1,')
self.assertEqual(decoded.a[1][0:3], '[1,')
self.assertEqual(decoded.a[2][0][0:3], '[1,')
class PicklableNamedTuple(object):
"""
A picklable namedtuple wrapper, to demonstrate the need
for protocol 2 compatibility. Yes, this is contrived in
its use of new, but it demonstrates the issue.
"""
def __new__(cls, propnames, vals):
# it's necessary to use the correct class name for class resolution
# classes that fake their own names may never be unpicklable
ntuple = collections.namedtuple(cls.__name__, propnames)
ntuple.__getnewargs__ = (lambda self: (propnames, vals))
instance = ntuple.__new__(ntuple, *vals)
return instance
class PicklableNamedTupleEx(object):
"""
A picklable namedtuple wrapper, to demonstrate the need
for protocol 4 compatibility. Yes, this is contrived in
its use of new, but it demonstrates the issue.
"""
def __getnewargs__(self):
raise NotImplementedError("This class needs __getnewargs_ex__")
def __new__(cls, newargs=__getnewargs__, **kwargs):
# it's necessary to use the correct class name for class resolution
# classes that fake their own names may never be unpicklable
ntuple = collections.namedtuple(cls.__name__, sorted(kwargs.keys()))
ntuple.__getnewargs_ex__ = (lambda self: ((), kwargs))
ntuple.__getnewargs__ = newargs
instance = ntuple.__new__(ntuple,
*[b for a, b in sorted(kwargs.items())])
return instance
class PickleProtocol2Thing(object):
def __init__(self, *args):
self.args = args
def __getnewargs__(self):
return self.args
def __eq__(self, other):
"""
Make PickleProtocol2Thing('slotmagic') ==
PickleProtocol2Thing('slotmagic')
"""
if self.__dict__ == other.__dict__ and dir(self) == dir(other):
for prop in dir(self):
selfprop = getattr(self, prop)
if not callable(selfprop) and prop[0] != '_':
if selfprop != getattr(other, prop):
return False
return True
else:
return False
# these two instances are used below and in tests
slotmagic = PickleProtocol2Thing('slotmagic')
dictmagic = PickleProtocol2Thing('dictmagic')
class PickleProtocol2GetState(PickleProtocol2Thing):
def __new__(cls, *args):
instance = super(PickleProtocol2GetState, cls).__new__(cls)
instance.newargs = args
return instance
def __getstate__(self):
return 'I am magic'
class PickleProtocol2GetStateDict(PickleProtocol2Thing):
def __getstate__(self):
return {'magic': True}
class PickleProtocol2GetStateSlots(PickleProtocol2Thing):
def __getstate__(self):
return (None, {'slotmagic': slotmagic})
class PickleProtocol2GetStateSlotsDict(PickleProtocol2Thing):
def __getstate__(self):
return ({'dictmagic': dictmagic}, {'slotmagic': slotmagic})
class PickleProtocol2GetSetState(PickleProtocol2GetState):
def __setstate__(self, state):
"""
Contrived example, easy to test
"""
if state == "I am magic":
self.magic = True
else:
self.magic = False
class PickleProtocol2ChildThing(object):
def __init__(self, child):
self.child = child
def __getnewargs__(self):
return ([self.child],)
class PickleProtocol2ReduceString(object):
def __reduce__(self):
return __name__+'.slotmagic'
class PickleProtocol2ReduceExString(object):
def __reduce__(self):
assert False, "Should not be here"
def __reduce_ex__(self, n):
return __name__+'.slotmagic'
class PickleProtocol2ReduceTuple(object):
def __init__(self, argval, optional=None):
self.argval = argval
self.optional = optional
def __reduce__(self):
return (PickleProtocol2ReduceTuple, # callable
('yam', 1), # args
None, # state
iter([]), # listitems
iter([]), # dictitems
)
def protocol_2_reduce_tuple_func(*args):
return PickleProtocol2ReduceTupleFunc(*args)
class PickleProtocol2ReduceTupleFunc(object):
def __init__(self, argval, optional=None):
self.argval = argval
self.optional = optional
def __reduce__(self):
return (protocol_2_reduce_tuple_func, # callable
('yam', 1), # args
None, # state
iter([]), # listitems
iter([]), # dictitems
)
def __newobj__(lol, fail):
"""
newobj is special-cased, such that it is not actually called
"""
class PickleProtocol2ReduceNewobj(PickleProtocol2ReduceTupleFunc):
def __new__(cls, *args):
inst = super(cls, cls).__new__(cls)
inst.newargs = args
return inst
def __reduce__(self):
return (__newobj__, # callable
(PickleProtocol2ReduceNewobj, 'yam', 1), # args
None, # state
iter([]), # listitems
iter([]), # dictitems
)
class PickleProtocol2ReduceTupleState(PickleProtocol2ReduceTuple):
def __reduce__(self):
return (PickleProtocol2ReduceTuple, # callable
('yam', 1), # args
{'foo': 1}, # state
iter([]), # listitems
iter([]), # dictitems
)
class PickleProtocol2ReduceTupleSetState(PickleProtocol2ReduceTuple):
def __setstate__(self, state):
self.bar = state['foo']
def __reduce__(self):
return (type(self), # callable
('yam', 1), # args
{'foo': 1}, # state
iter([]), # listitems
iter([]), # dictitems
)
class PickleProtocol2ReduceTupleStateSlots(object):
__slots__ = ('argval', 'optional', 'foo')
def __init__(self, argval, optional=None):
self.argval = argval
self.optional = optional
def __reduce__(self):
return (PickleProtocol2ReduceTuple, # callable
('yam', 1), # args
{'foo': 1}, # state
iter([]), # listitems
iter([]), # dictitems
)
class PickleProtocol2ReduceListitemsAppend(object):
def __init__(self):
self.inner = []
def __reduce__(self):
return (PickleProtocol2ReduceListitemsAppend, # callable
(), # args
{}, # state
iter(['foo', 'bar']), # listitems
iter([]), # dictitems
)
def append(self, item):
self.inner.append(item)
class PickleProtocol2ReduceListitemsExtend(object):
def __init__(self):
self.inner = []
def __reduce__(self):
return (PickleProtocol2ReduceListitemsAppend, # callable
(), # args
{}, # state
iter(['foo', 'bar']), # listitems
iter([]), # dictitems
)
def extend(self, items):
self.inner.exend(items)
class PickleProtocol2ReduceDictitems(object):
def __init__(self):
self.inner = {}
def __reduce__(self):
return (PickleProtocol2ReduceDictitems, # callable
(), # args
{}, # state
[], # listitems
iter(zip(['foo', 'bar'], ['foo', 'bar'])), # dictitems
)
def __setitem__(self, k, v):
return self.inner.__setitem__(k, v)
class PickleProtocol2Classic:
def __init__(self, foo):
self.foo = foo
class PickleProtocol2ClassicInitargs:
def __init__(self, foo, bar=None):
self.foo = foo
if bar:
self.bar = bar
def __getinitargs__(self):
return ('choo', 'choo')
class PicklingProtocol4TestCase(unittest.TestCase):
def test_pickle_newargs_ex(self):
"""
Ensure we can pickle and unpickle an object whose class needs arguments
to __new__ and get back the same typle
"""
instance = PicklableNamedTupleEx(**{'a': 'b', 'n': 2})
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(instance, decoded)
def test_validate_reconstruct_by_newargs_ex(self):
"""
Ensure that the exemplar tuple's __getnewargs_ex__ works
This is necessary to know whether the breakage exists
in jsonpickle or not
"""
instance = PicklableNamedTupleEx(**{'a': 'b', 'n': 2})
args, kwargs = instance.__getnewargs_ex__()
newinstance = PicklableNamedTupleEx.__new__(PicklableNamedTupleEx,
*args, **kwargs)
self.assertEqual(instance, newinstance)
def test_references(self):
shared = Thing('shared')
instance = PicklableNamedTupleEx(**{'a': shared, 'n': shared})
child = Thing('child')
shared.child = child
child.child = instance
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded[0], decoded[1])
self.assertTrue(decoded[0] is decoded[1])
self.assertTrue(decoded.a is decoded.n)
self.assertEqual(decoded.a.name, 'shared')
self.assertEqual(decoded.a.child.name, 'child')
self.assertTrue(decoded.a.child.child is decoded)
self.assertTrue(decoded.n.child.child is decoded)
self.assertTrue(decoded.a.child is decoded.n.child)
self.assertEqual(decoded.__class__.__name__,
PicklableNamedTupleEx.__name__)
# TODO the class itself looks just like the real class, but it's
# actually a reconstruction; PicklableNamedTupleEx is not type(decoded).
self.assertFalse(decoded.__class__ is PicklableNamedTupleEx)
class PicklingProtocol2TestCase(SkippableTest):
def test_classic_init_has_args(self):
"""
Test unpickling a classic instance whose init takes args,
has no __getinitargs__
Because classic only exists under 2, skipped if PY3
"""
if PY3:
return self.skip('No classic classes in PY3')
instance = PickleProtocol2Classic(3)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.foo, 3)
def test_getinitargs(self):
"""
Test __getinitargs__ with classic instance
Because classic only exists under 2, skipped if PY3
"""
if PY3:
return self.skip('No classic classes in PY3')
instance = PickleProtocol2ClassicInitargs(3)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.bar, 'choo')
def test_reduce_complex_num(self):
instance = 5j
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded, instance)
def test_reduce_complex_zero(self):
instance = 0j
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded, instance)
def test_reduce_dictitems(self):
'Test reduce with dictitems set (as a generator)'
instance = PickleProtocol2ReduceDictitems()
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.inner, {'foo': 'foo', 'bar': 'bar'})
def test_reduce_listitems_extend(self):
'Test reduce with listitems set (as a generator), yielding single items'
instance = PickleProtocol2ReduceListitemsExtend()
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.inner, ['foo', 'bar'])
def test_reduce_listitems_append(self):
'Test reduce with listitems set (as a generator), yielding single items'
instance = PickleProtocol2ReduceListitemsAppend()
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.inner, ['foo', 'bar'])
def test_reduce_state_setstate(self):
'Test reduce with the optional state argument set, on an object with '\
'a __setstate__'
instance = PickleProtocol2ReduceTupleSetState(5)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.argval, 'yam')
self.assertEqual(decoded.optional, 1)
self.assertEqual(decoded.bar, 1)
self.assertFalse(hasattr(decoded, 'foo'))
def test_reduce_state_no_dict(self):
'Test reduce with the optional state argument set, on an object with '\
'no __dict__, and no __setstate__'
instance = PickleProtocol2ReduceTupleStateSlots(5)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.argval, 'yam')
self.assertEqual(decoded.optional, 1)
self.assertEqual(decoded.foo, 1)
def test_reduce_state_dict(self):
'Test reduce with the optional state argument set, on an object with '\
'a __dict__, and no __setstate__'
instance = PickleProtocol2ReduceTupleState(5)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.argval, 'yam')
self.assertEqual(decoded.optional, 1)
self.assertEqual(decoded.foo, 1)
def test_reduce_basic(self):
"""Test reduce with only callable and args"""
instance = PickleProtocol2ReduceTuple(5)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.argval, 'yam')
self.assertEqual(decoded.optional, 1)
def test_reduce_basic_func(self):
"""Test reduce with only callable and args
callable is a module-level function
"""
instance = PickleProtocol2ReduceTupleFunc(5)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.argval, 'yam')
self.assertEqual(decoded.optional, 1)
def test_reduce_newobj(self):
"""Test reduce with callable called __newobj__
ensures special-case behaviour
"""
instance = PickleProtocol2ReduceNewobj(5)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.newargs, ('yam', 1))
def test_reduce_iter(self):
instance = iter('123')
self.assertTrue(util.is_iterator(instance))
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(next(decoded), '1')
self.assertEqual(next(decoded), '2')
self.assertEqual(next(decoded), '3')
def test_reduce_string(self):
"""
Ensure json pickle will accept the redirection to another object when
__reduce__ returns a string
"""
instance = PickleProtocol2ReduceString()
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded, slotmagic)
def test_reduce_ex_string(self):
"""
Ensure json pickle will accept the redirection to another object when
__reduce_ex__ returns a string
ALSO tests that __reduce_ex__ is called in preference to __reduce__
"""
instance = PickleProtocol2ReduceExString()
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded, slotmagic)
def test_pickle_newargs(self):
"""
Ensure we can pickle and unpickle an object whose class needs arguments
to __new__ and get back the same typle
"""
instance = PicklableNamedTuple(('a', 'b'), (1, 2))
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(instance, decoded)
def test_validate_reconstruct_by_newargs(self):
"""
Ensure that the exemplar tuple's __getnewargs__ works
This is necessary to know whether the breakage exists
in jsonpickle or not
"""
instance = PicklableNamedTuple(('a', 'b'), (1, 2))
newinstance = PicklableNamedTuple.__new__(PicklableNamedTuple,
*(instance.__getnewargs__()))
self.assertEqual(instance, newinstance)
def test_getnewargs_priority(self):
"""
Ensure newargs are used before py/state when decoding
(As per PEP 307, classes are not supposed to implement
all three magic methods)
"""
instance = PickleProtocol2GetState('whatevs')
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.newargs, ('whatevs',))
def test_restore_dict_state(self):
"""
Ensure that if getstate returns a dict, and there is no custom
__setstate__, the dict is used as a source of variables to restore
"""
instance = PickleProtocol2GetStateDict('whatevs')
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertTrue(decoded.magic)
def test_restore_slots_state(self):
"""
Ensure that if getstate returns a 2-tuple with a dict in the second
position, and there is no custom __setstate__, the dict is used as a
source of variables to restore
"""
instance = PickleProtocol2GetStateSlots('whatevs')
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(decoded.slotmagic.__dict__, slotmagic.__dict__)
self.assertEqual(decoded.slotmagic, slotmagic)
def test_restore_slots_dict_state(self):
"""
Ensure that if getstate returns a 2-tuple with a dict in both positions,
and there is no custom __setstate__, the dicts are used as a source of
variables to restore
"""
instance = PickleProtocol2GetStateSlotsDict('whatevs')
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(PickleProtocol2Thing('slotmagic'),
PickleProtocol2Thing('slotmagic'))
self.assertEqual(decoded.slotmagic.__dict__, slotmagic.__dict__)
self.assertEqual(decoded.slotmagic, slotmagic)
self.assertEqual(decoded.dictmagic, dictmagic)
def test_setstate(self):
"""
Ensure output of getstate is passed to setstate
"""
instance = PickleProtocol2GetSetState('whatevs')
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertTrue(decoded.magic)
def test_handles_nested_objects(self):
child = PickleProtocol2Thing(None)
instance = PickleProtocol2Thing(child, child)
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertEqual(PickleProtocol2Thing, decoded.__class__)
self.assertEqual(PickleProtocol2Thing, decoded.args[0].__class__)
self.assertEqual(PickleProtocol2Thing, decoded.args[1].__class__)
self.assertTrue(decoded.args[0] is decoded.args[1])
def test_handles_cyclical_objects(self):
child = Capture(None)
instance = Capture(child, child)
child.args = (instance,) # create a cycle
# TODO we do not properly restore references inside of lists.
# Change the above tuple into a list to show the breakage.
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
# Ensure the right objects were constructed
self.assertEqual(Capture, decoded.__class__)
self.assertEqual(Capture, decoded.args[0].__class__)
self.assertEqual(Capture, decoded.args[1].__class__)
self.assertEqual(Capture, decoded.args[0].args[0].__class__)
self.assertEqual(Capture, decoded.args[1].args[0].__class__)
# It's turtles all the way down
self.assertEqual(Capture, decoded.args[0].args[0]
.args[0].args[0]
.args[0].args[0]
.args[0].args[0]
.args[0].args[0]
.args[0].args[0]
.args[0].args[0]
.args[0].__class__)
# Ensure that references are properly constructed
self.assertTrue(decoded.args[0] is decoded.args[1])
self.assertTrue(decoded is decoded.args[0].args[0])
self.assertTrue(decoded is decoded.args[1].args[0])
self.assertTrue(decoded.args[0] is decoded.args[0].args[0].args[0])
self.assertTrue(decoded.args[0] is decoded.args[1].args[0].args[0])
def test_handles_cyclical_objects_in_lists(self):
child = PickleProtocol2ChildThing(None)
instance = PickleProtocol2ChildThing([child, child])
child.child = instance # create a cycle
encoded = jsonpickle.encode(instance)
decoded = jsonpickle.decode(encoded)
self.assertTrue(decoded is decoded.child[0].child)
self.assertTrue(decoded is decoded.child[1].child)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(JSONPickleTestCase))
suite.addTest(unittest.makeSuite(PicklingTestCase))
suite.addTest(unittest.makeSuite(PicklingProtocol2TestCase))
suite.addTest(unittest.makeSuite(PicklingProtocol4TestCase))
suite.addTest(doctest.DocTestSuite(jsonpickle))
suite.addTest(doctest.DocTestSuite(jsonpickle.pickler))
suite.addTest(doctest.DocTestSuite(jsonpickle.unpickler))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 35.352763
| 80
| 0.625276
|
b79abc445668b5f396bb8d5d57c397ad8da18d51
| 961
|
py
|
Python
|
webcam/widgets.py
|
beda-software/django-webcam
|
c997f38d426ff439f37c32a9d32d939fc8d281a3
|
[
"BSD-1-Clause"
] | 1
|
2019-10-10T16:24:01.000Z
|
2019-10-10T16:24:01.000Z
|
webcam/widgets.py
|
beda-software/django-webcam
|
c997f38d426ff439f37c32a9d32d939fc8d281a3
|
[
"BSD-1-Clause"
] | null | null | null |
webcam/widgets.py
|
beda-software/django-webcam
|
c997f38d426ff439f37c32a9d32d939fc8d281a3
|
[
"BSD-1-Clause"
] | 2
|
2018-02-04T13:01:04.000Z
|
2020-03-01T20:10:39.000Z
|
from django.forms.widgets import Widget
from django.template.loader import render_to_string
class CameraWidget(Widget):
template = 'webcam/webcam.html'
def render(self, name, value, attrs=None):
defaults = {'name': name,
'format': self.attrs['format'],
'width': self.attrs['width'],
'height': self.attrs['height'],
'camera_width': self.attrs['camera_width'],
'camera_height': self.attrs['camera_height'],
'picture': value,
'attrs': attrs}
defaults.update(attrs)
return render_to_string(self.template, defaults)
def value_from_datadict(self, data, files, name):
raw_val = data.get("data_%s" % name, None)
filename = data.get("%s" % name, None)
if raw_val:
raw_val = raw_val.replace('data:image/jpeg;base64,', '')
return filename, raw_val
| 36.961538
| 68
| 0.569199
|
ae6659cc34637189e2558693d65e43ed814a6f3e
| 9,612
|
py
|
Python
|
app/scheme/utils.py
|
egormm/geo-optic-net-monitoring
|
9fab8595f6c51fd9f4f9f7e6ed29736d5f3ee985
|
[
"MIT"
] | null | null | null |
app/scheme/utils.py
|
egormm/geo-optic-net-monitoring
|
9fab8595f6c51fd9f4f9f7e6ed29736d5f3ee985
|
[
"MIT"
] | null | null | null |
app/scheme/utils.py
|
egormm/geo-optic-net-monitoring
|
9fab8595f6c51fd9f4f9f7e6ed29736d5f3ee985
|
[
"MIT"
] | null | null | null |
import base64
from collections import OrderedDict
from typing import List
import graphviz
def safe_init_nodes(source: List[str], graph: graphviz.dot, name: str) -> List[str]:
for node in source:
graph.node(f"{name}-{node}", label=node)
if not len(source):
graph.node(f"{name}-fake", style='invis')
source.append("fake")
return source
class Node:
def __init__(self, n_type, n_id, n_term=0, **kwargs):
self.label = f"{n_type}-{n_id}"
self.n_term = n_term
if 'color' in kwargs:
self.color = kwargs['color']
else:
self.color = "grey70"
class FakeNode(Node):
def __init__(self, n_type):
super().__init__(n_type, "fake")
class Cluster(graphviz.Digraph):
def __init__(self, cluster_type: str,
field_mapping: dict,
serialized_data: List[dict],
*args, **kwargs
):
self.cluster_type = cluster_type
self.name = f"cluster_{cluster_type}"
self.nodes: List[Node] = []
self.safe_init_nodes(field_mapping, serialized_data)
super().__init__(*args, **kwargs)
def safe_init_nodes(self, field_mapping: dict, serialized_data: List[dict]):
for element in serialized_data:
self.nodes.append(Node(self.cluster_type,
element[field_mapping['id']],
element.get(field_mapping['n_term'], 0)
))
if len(self.nodes) == 0:
self.nodes.append(FakeNode(self.cluster_type))
class Graph(graphviz.Digraph):
def __init__(self, serialized_data, *args, **kwargs):
self.inputs = Cluster("inputs",
{"id": "node_id",
"n_term": "n_terminals"},
serialized_data['inputs'], )
def get_img(data, debug=False):
inp_to_split = False
split_to_out = False
inputs = [str(inp['node_id']) for inp in data['inputs']]
splitters = [splt['node_id'] for splt in data['splitters']]
outputs = [str(out['node_id']) for out in data['outputs']]
fibers = []
for fiber in data['fibers']:
if fiber['from_node'] != {} and fiber['to_node'] != {}:
if len({fiber['from_node']['node_type'], fiber['to_node']['node_type']}.symmetric_difference(
{"inputs", "splitters"})) == 0:
inp_to_split = True
if len({fiber['from_node']['node_type'], fiber['to_node']['node_type']}.symmetric_difference(
{"splitters", "outputs"})) == 0:
split_to_out = True
fibers.append((f"{fiber['from_node']['node_type']}-{fiber['from_node']['node_id']}",
f"{fiber['to_node']['node_type']}-{fiber['to_node']['node_id']}",
fiber['color'], str(fiber['id'])))
elif fiber['from_node'] != {}:
fibers.append((f"{fiber['from_node']['node_type']}-{fiber['from_node']['node_id']}",
"",
fiber['color'], str(fiber['id'])))
elif fiber['to_node'] != {}:
fibers.append(("",
f"{fiber['to_node']['node_type']}-{fiber['to_node']['node_id']}",
fiber['color'], str(fiber['id'])))
g = graphviz.Digraph("mufta",
graph_attr={"rankdir": "LR",
"compound": "true",
"bgcolor": "transparent"
},
format="png", )
input_graph = graphviz.Digraph(name="cluster_inputs",
graph_attr={"rank": "same",
"style": "invis"
},
node_attr={"shape": "rarrow"})
inputs = safe_init_nodes(inputs, input_graph, "inputs")
g.subgraph(input_graph)
splitter_graph = graphviz.Digraph(name="cluster_splitters",
graph_attr={"rank": "same", "label": "Splitters"},
node_attr={"shape": "square"})
splitters = safe_init_nodes(splitters, splitter_graph, "splitters")
g.subgraph(splitter_graph)
output_graph = graphviz.Digraph(name="cluster_outputs",
graph_attr={
"rank": "same",
"style": "invis"
},
node_attr={"shape": "rarrow"})
outputs = safe_init_nodes(outputs, output_graph, "outputs")
g.subgraph(output_graph)
# #--------------
#
# g.edge("inp1", "splitter1", color="invis", ltail="cluster_inputs", lhead="cluster_splitters")
# g.edge("inp3", "mid2", color="invis")
# g.edge("inp1", "mid1", color="blue:green")
# g.edge("inp1", "mid2", color="purple:red")
# g.edge("inp2", "mid2", color="blue")
# g.edge("mid2", "mid3", color="orange")
# g.edge("splitter1", "out1", color="invis", ltail="cluster_splitters", lhead='cluster_outs')
# g.edge("inp1", "mid1")
if not inp_to_split:
g.edge(f"inputs-{inputs[len(inputs) // 2]}", f"splitters-{splitters[len(splitters) // 2]}",
color="invis",
ltail="cluster_inputs",
lhead="cluster_splitters")
if not split_to_out:
g.edge(f"splitters-{splitters[len(splitters) // 2]}", f"outputs-{outputs[len(outputs) // 2]}",
color="invis",
ltail="cluster_splitters",
lhead="cluster_outputs")
for a, b, c, d in fibers:
if not a:
if "inputs" in b:
g.edge(f"splitters-{splitters[len(splitters) // 2]}", b, color=c, ltail="cluster_splitters", label=d)
elif "splitters" in b:
g.edge(f"inputs-{inputs[len(inputs) // 2]}", b, color=c, ltail="cluster_inputs", label=d)
else:
g.edge(f"splitters-{splitters[len(splitters) // 2]}", b, color=c, ltail="cluster_splitters", label=d)
elif not b:
if "inputs" in a:
g.edge(a, f"splitters-{splitters[len(splitters) // 2]}", color=c, lhead="cluster_splitters", label=d)
elif "splitters" in a:
g.edge(a, f"outputs-{outputs[len(outputs) // 2]}", color=c, lhead="cluster_outputs", label=d)
else:
g.edge(a, f"splitters-{splitters[len(splitters) // 2]}", color=c, lhead="cluster_splitters", label=d)
else:
# if "inputs" in a and "outputs" in b:
# g.edge(a, b, color=c, label=d, constraint='false')
# else:
g.edge(a, b, color=c, label=d)
if debug:
g.view()
return base64.b64encode(g.pipe()).decode()
if __name__ == '__main__':
print(get_img({'fibers': [{'box': 15,
'color': 'red',
'end_content_type': 11,
'end_object_id': 2,
'from_node': OrderedDict([('node_id', '8'),
('node_type', 'inputs')]),
'id': 8,
'start_content_type': 12,
'start_object_id': 20,
'to_node': OrderedDict([('node_id', '9'),
('node_type', 'outputs')])},
{'box': 15,
'color': 'red',
'end_content_type': 11,
'end_object_id': 2,
'from_node': OrderedDict([('node_id', '8'),
('node_type', 'inputs')]),
'id': 9,
'start_content_type': 12,
'start_object_id': 20,
'to_node': OrderedDict([('node_id', '9'),
('node_type', 'outputs')])},
{'box': 15,
'color': 'red',
'end_content_type': 11,
'end_object_id': 2,
'from_node': OrderedDict([('node_id', '8'),
('node_type', 'inputs')]),
'id': 10,
'start_content_type': 12,
'start_object_id': 20,
'to_node': OrderedDict([('node_id', '9'),
('node_type', 'outputs')])}],
'inputs': [{'box': 15, 'id': 20, 'input': 8, 'n_terminals': 8, 'node_id': '8'},
{'box': 15,
'id': 22,
'input': 9,
'n_terminals': 8,
'node_id': '9'}],
'outputs': [{'box': 15,
'id': 2,
'n_terminals': 8,
'node_id': '9',
'output': 9}],
'splitters': []}, debug=True))
| 43.493213
| 117
| 0.439971
|
9d1c5c037eb71c3ede23aabe8562e20e0e21beb7
| 282
|
py
|
Python
|
ultimatethumb/__init__.py
|
stephrdev/django-ultimatethumb
|
bb14a8d82041453f34439b82cb054a8de7e7bc8b
|
[
"BSD-3-Clause"
] | 4
|
2015-03-23T15:58:13.000Z
|
2020-04-10T00:02:32.000Z
|
ultimatethumb/__init__.py
|
moccu/django-ultimatethumb
|
bb14a8d82041453f34439b82cb054a8de7e7bc8b
|
[
"BSD-3-Clause"
] | 23
|
2015-05-12T10:35:01.000Z
|
2021-09-08T00:09:01.000Z
|
ultimatethumb/__init__.py
|
stephrdev/django-ultimatethumb
|
bb14a8d82041453f34439b82cb054a8de7e7bc8b
|
[
"BSD-3-Clause"
] | 4
|
2015-06-30T09:06:00.000Z
|
2021-09-28T13:52:53.000Z
|
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
# This is required for Python versions < 3.8
import importlib_metadata
try:
__version__ = importlib_metadata.version('django-ultimatethumb')
except Exception:
__version__ = 'HEAD'
| 25.636364
| 68
| 0.765957
|
a3676c816b770e7ea776c627fb4567a1ca29dd40
| 2,520
|
py
|
Python
|
third_party/buildbot_8_4p1/buildbot/steps/package/rpm/rpmspec.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/buildbot_8_4p1/buildbot/steps/package/rpm/rpmspec.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/buildbot_8_4p1/buildbot/steps/package/rpm/rpmspec.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright Dan Radez <dradez+buildbot@redhat.com>
# Portions Copyright Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
"""
library to populate parameters from and rpmspec file into a memory structure
"""
import re
from buildbot.steps.shell import ShellCommand
class RpmSpec(ShellCommand):
"""
read parameters out of an rpm spec file
"""
#initialize spec info vars and get them from the spec file
n_regex = re.compile('^Name:[ ]*([^\s]*)')
v_regex = re.compile('^Version:[ ]*([0-9\.]*)')
def __init__(self, specfile=None, **kwargs):
"""
Creates the RpmSpec object.
@type specfile: str
@param specfile: the name of the specfile to get the package
name and version from
@type kwargs: dict
@param kwargs: All further keyword arguments.
"""
self.specfile = specfile
self._pkg_name = None
self._pkg_version = None
self._loaded = False
def load(self):
"""
call this function after the file exists to populate properties
"""
# If we are given a string, open it up else assume it's something we
# can call read on.
if isinstance(self.specfile, str):
f = open(self.specfile, 'r')
else:
f = self.specfile
for line in f:
if self.v_regex.match(line):
self._pkg_version = self.v_regex.match(line).group(1)
if self.n_regex.match(line):
self._pkg_name = self.n_regex.match(line).group(1)
f.close()
self._loaded = True
# Read-only properties
loaded = property(lambda self: self._loaded)
pkg_name = property(lambda self: self._pkg_name)
pkg_version = property(lambda self: self._pkg_version)
| 35
| 79
| 0.661508
|
11ad2d82868cdc71d25a93b3c19893f3c7f0579a
| 30,317
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_virtual_hubs_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_virtual_hubs_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_virtual_hubs_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubsOperations(object):
"""VirtualHubsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
"""Retrieves the details of a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'VirtualHub')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHub"]
"""Creates a VirtualHub resource if it doesn't exist else updates the existing VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to create or update VirtualHub.
:type virtual_hub_parameters: ~azure.mgmt.network.v2018_04_01.models.VirtualHub
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_04_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHub"]
"""Updates VirtualHub tags.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to update VirtualHub tags.
:type virtual_hub_parameters: ~azure.mgmt.network.v2018_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_04_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a resource group.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualHubs'} # type: ignore
| 48.741158
| 191
| 0.660059
|
8e8b5e9af516e8c041ffcc94a346ec36027da104
| 181
|
py
|
Python
|
Mundo 1/ex027.py
|
othiagomanhaes/Python
|
8cfe6d50e31f6c9ff886e3961051cc4cbfb8569e
|
[
"MIT"
] | null | null | null |
Mundo 1/ex027.py
|
othiagomanhaes/Python
|
8cfe6d50e31f6c9ff886e3961051cc4cbfb8569e
|
[
"MIT"
] | null | null | null |
Mundo 1/ex027.py
|
othiagomanhaes/Python
|
8cfe6d50e31f6c9ff886e3961051cc4cbfb8569e
|
[
"MIT"
] | null | null | null |
nome = str(input('Digite o teu nome completo: ')).strip()
name = nome.split()
print('O primeiro nome é: {}'.format(name[0]))
print('O último nome é: {}'.format(name[len(name)-1]))
| 30.166667
| 57
| 0.646409
|
dd84ffa55f84a34945ec9433580a6d212ff3b9b2
| 1,424
|
py
|
Python
|
src/django_secret_sharing/migrations/0001_add_secret_model.py
|
vicktornl/django-secret-sharing
|
984dafb7769e11b810387c972a5eb99d5ff100ce
|
[
"MIT"
] | 3
|
2022-02-24T15:44:56.000Z
|
2022-03-01T13:17:33.000Z
|
src/django_secret_sharing/migrations/0001_add_secret_model.py
|
vicktornl/django-secret-sharing
|
984dafb7769e11b810387c972a5eb99d5ff100ce
|
[
"MIT"
] | 4
|
2022-02-24T15:47:09.000Z
|
2022-03-01T12:09:58.000Z
|
src/django_secret_sharing/migrations/0001_add_secret_model.py
|
vicktornl/django-secret-sharing
|
984dafb7769e11b810387c972a5eb99d5ff100ce
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.12 on 2021-03-15 09:49
import uuid
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Secret",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("value", models.BinaryField(blank=True, null=True)),
("erased", models.BooleanField(default=False)),
(
"created_at",
models.DateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name="created at",
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="modified at"),
),
("erased_at", models.DateTimeField(blank=True, null=True)),
],
options={
"verbose_name": "Secret",
"verbose_name_plural": "Secrets",
},
),
]
| 29.061224
| 84
| 0.429775
|
ebd59a92c29c4ba6470cb4121af9687049e14a73
| 74
|
py
|
Python
|
src/ga/__init__.py
|
yushikmr/OptModels
|
b82f1c8f80186c4617f681cca013b3dab03024fd
|
[
"MIT"
] | null | null | null |
src/ga/__init__.py
|
yushikmr/OptModels
|
b82f1c8f80186c4617f681cca013b3dab03024fd
|
[
"MIT"
] | null | null | null |
src/ga/__init__.py
|
yushikmr/OptModels
|
b82f1c8f80186c4617f681cca013b3dab03024fd
|
[
"MIT"
] | null | null | null |
from .algorithm import *
from .construct import *
from .operators import *
| 24.666667
| 24
| 0.77027
|
41e0bed44e76a24a13e888d27f01838624e8c5d3
| 13,367
|
py
|
Python
|
pyxb/namespace/builtin.py
|
thorstenb/pyxb
|
634e86f61dfb73a2900f32fc3d819e9c25365a49
|
[
"Apache-2.0"
] | null | null | null |
pyxb/namespace/builtin.py
|
thorstenb/pyxb
|
634e86f61dfb73a2900f32fc3d819e9c25365a49
|
[
"Apache-2.0"
] | null | null | null |
pyxb/namespace/builtin.py
|
thorstenb/pyxb
|
634e86f61dfb73a2900f32fc3d819e9c25365a49
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Classes and global objects related to built-in U{XML Namespaces<http://www.w3.org/TR/2006/REC-xml-names-20060816/index.html>}."""
import pyxb
# A unique identifier for components that are built-in to the PyXB system
BuiltInObjectUID = pyxb.utils.utility.UniqueIdentifier('PyXB-' + pyxb.__version__ + '-Builtin')
from pyxb.namespace import Namespace
class _XMLSchema_instance (Namespace):
"""Extension of L{Namespace} that pre-defines components available in the
XMLSchema Instance namespace."""
PT_strict = 'strict'
"""xsi:type is validated and supersedes the declared type. If no xsi:type is
present, the declared element type will be used. If xsi:type is
present, it must resolve to valid type. The resolved type must be
a subclass of the declared type (if available), and will be used
for the binding."""
PT_lax = 'lax'
"""xsi:type supersedes the declared type without validation. If
no xsi:type is present, or it is present and fails to resolve to a
type, the declared element type will be used. If xsi:type is
present and resolves to valid type, that type will be used for the
binding, even if it is not a subclass of the declared type."""
PT_skip = 'skip'
"""xsi:type attributes are ignored. The declared element type
will be used."""
__processType = PT_strict
type = None
"""An expanded name for {http://www.w3.org/2001/XMLSchema-instance}type."""
nil = None
"""An expanded name for {http://www.w3.org/2001/XMLSchema-instance}nil."""
def __init__ (self, *args, **kw):
super(_XMLSchema_instance, self).__init__(*args, **kw)
self.type = self.createExpandedName('type')
self.nil = self.createExpandedName('nil')
# NB: Because Namespace instances are singletons, I've made this
# is an instance method even though it looks and behaves like a
# class method.
def ProcessTypeAttribute (self, value=None):
"""Specify how PyXB should interpret xsi:type attributes when
converting a document to a binding instance.
The default value is L{PT_strict}.
xsi:type should only be provided when using an abstract class,
or a concrete class that happens to be the same as the
xsi:type value, or when accepting a wildcard that has an
unrecognized element name. In practice, web services tend to
set it on nodes just to inform their lax-processing clients
how to interpret the value.
@param value: One of L{PT_strict}, L{PT_lax}, L{PT_skip}, or C{None} (no change)
@return: The current configuration for processing xsi:type attributes
"""
if value in (self.PT_strict, self.PT_lax, self.PT_skip):
self.__processType = value
elif value is not None:
raise pyxb.ValueError(value)
return self.__processType
def _InterpretTypeAttribute (self, type_name, ns_ctx, fallback_namespace, type_class):
"""Interpret the value of an xsi:type attribute as configured.
@param type_name: The QName value from the attribute
@param ns_ctx: The NamespaceContext within which the type_name should be resolved
@param fallback_namespace: The namespace that should be used if the type name has no prefix
@param type_class: The value to return if the type name is missing or acceptably invalid
@raises L{pyxb.BadDocumentError}: if the processing type
configuration is L{PT_strict} and the type name fails to
resolve to a type definition that is consistent with any
provided type_class.
"""
did_replace = False
if type_name is None:
return (did_replace, type_class)
pt = self.__processType
if self.PT_skip == pt:
return (did_replace, type_class)
type_en = ns_ctx.interpretQName(type_name, namespace=fallback_namespace)
try:
alternative_type_class = type_en.typeBinding()
except KeyError, e:
alternative_type_class = None
if self.PT_strict == pt:
if alternative_type_class is None:
raise pyxb.BadDocumentError('No type binding for %s' % (type_name,))
if (type_class is not None) and (not issubclass(alternative_type_class, type_class)):
raise pyxb.BadDocumentError('%s value %s is not subclass of element type %s' % (type_name, type_en, type_class._ExpandedName))
if (self.PT_strict == pt) or ((self.PT_lax == pt) and (alternative_type_class is not None)):
type_class = alternative_type_class
did_replace = True
return (did_replace, type_class)
def _defineBuiltins_ox (self, structures_module):
"""Ensure this namespace is ready for use.
Overrides base class implementation, since there is no schema
for this namespace. """
assert structures_module is not None
schema = structures_module.Schema(namespace_context=self.initialNamespaceContext(), schema_location="URN:noLocation:PyXB:xsi", generation_uid=BuiltInObjectUID, _bypass_preload=True)
type = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('type', schema))
nil = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('nil', schema))
schema_location = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('schemaLocation', schema))
no_namespace_schema_location = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('noNamespaceSchemaLocation', schema))
return self
class _XML (Namespace):
"""Extension of L{Namespace} that pre-defines components available in the
XML (xml) namespace. Specifically those are the attribute declarations:
- C{xml:space}
- C{xml:lang}
- C{xml:base}
- C{xml:id}
the encompassing attribute group declaration:
- C{xml:specialAttrs}
and the anonymous types that support these."""
def _defineBuiltins_ox (self, structures_module):
"""Ensure this namespace is ready for use.
Overrides base class implementation, since there is no schema
for this namespace. """
assert structures_module is not None
import pyxb.binding.datatypes as xsd
import pyxb.binding.facets as xsdf
import archive
self.configureCategories([archive.NamespaceArchive._AnonymousCategory()])
schema = structures_module.Schema(namespace_context=self.initialNamespaceContext(), schema_location="URN:noLocation:PyXB:XML", generation_uid=BuiltInObjectUID, _bypass_preload=True)
std_space = structures_module.SimpleTypeDefinition._CreateXMLInstance('space', schema)
std_space._setAnonymousName(self, anon_name='STD_ANON_space')
std_space._setBindingNamespace(self)
std_lang = structures_module.SimpleTypeDefinition._CreateXMLInstance('lang', schema)
std_lang._setAnonymousName(self, anon_name='STD_ANON_lang')
std_lang._setBindingNamespace(self)
base = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('base', schema, std=xsd.anyURI.SimpleTypeDefinition()))
id = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('id', schema, std=xsd.ID.SimpleTypeDefinition()))
space = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('space', schema, std=std_space))
lang = schema._addNamedComponent(structures_module.AttributeDeclaration.CreateBaseInstance('lang', schema, std=std_lang))
specialAttrs = schema._addNamedComponent(structures_module.AttributeGroupDefinition.CreateBaseInstance('specialAttrs', schema, [
structures_module.AttributeUse.CreateBaseInstance(schema, space),
structures_module.AttributeUse.CreateBaseInstance(schema, base),
structures_module.AttributeUse.CreateBaseInstance(schema, lang),
structures_module.AttributeUse.CreateBaseInstance(schema, id),
]))
return self
class _XMLSchema (Namespace):
"""Extension of L{Namespace} that pre-defines components available in the
XMLSchema namespace.
The types are defined when L{pyxb.xmlschema.structures} is imported.
"""
def _defineBuiltins_ox (self, structures_module):
"""Register the built-in types into the XMLSchema namespace."""
# Defer the definitions to the structures module
assert structures_module is not None
structures_module._AddSimpleTypes(self)
# A little validation here
assert structures_module.ComplexTypeDefinition.UrTypeDefinition() == self.typeDefinitions()['anyType']
assert structures_module.SimpleTypeDefinition.SimpleUrTypeDefinition() == self.typeDefinitions()['anySimpleType']
# Provide access to the binding classes
self.configureCategories(['typeBinding', 'elementBinding'])
for ( en, td ) in self.typeDefinitions().items():
if td.pythonSupport() is not None:
self.addCategoryObject('typeBinding', en, td.pythonSupport())
XMLSchema_instance = _XMLSchema_instance('http://www.w3.org/2001/XMLSchema-instance',
description='XML Schema Instance',
builtin_namespace='XMLSchema_instance')
"""Namespace and URI for the XMLSchema Instance namespace. This is always
built-in, and does not (cannot) have an associated schema."""
XMLNamespaces = Namespace('http://www.w3.org/2000/xmlns/',
description='Namespaces in XML',
builtin_namespace='XMLNamespaces',
bound_prefix='xmlns')
"""Namespaces in XML. Not really a namespace, but is always available as C{xmlns}."""
# http://www.w3.org/2001/XMLSchema.xsd
XMLSchema = _XMLSchema('http://www.w3.org/2001/XMLSchema',
description='XML Schema',
builtin_namespace='XMLSchema',
builtin_module_path='pyxb.binding.datatypes',
in_scope_namespaces = { 'xs' : None })
"""Namespace and URI for the XMLSchema namespace (often C{xs}, or C{xsd})"""
# http://www.w3.org/1999/xhtml.xsd
XHTML = Namespace('http://www.w3.org/1999/xhtml',
description='Family of document types that extend HTML',
builtin_namespace='XHTML',
default_namespace=XMLSchema)
"""There really isn't a schema for this, but it's used as the default
namespace in the XML schema, so define it."""
# http://www.w3.org/2001/xml.xsd
XML = _XML('http://www.w3.org/XML/1998/namespace',
description='XML namespace',
builtin_namespace='XML',
builtin_module_path='pyxb.binding.xml_',
is_undeclared_namespace=True,
bound_prefix='xml',
default_namespace=XHTML,
in_scope_namespaces = { 'xs' : XMLSchema })
"""Namespace and URI for XML itself (always available as C{xml})"""
# http://www.w3.org/2001/XMLSchema-hasFacetAndProperty
XMLSchema_hfp = Namespace('http://www.w3.org/2001/XMLSchema-hasFacetAndProperty',
description='Facets appearing in appinfo section',
builtin_namespace='XMLSchema_hfp',
default_namespace=XMLSchema,
in_scope_namespaces = { 'hfp' : None
, 'xhtml' : XHTML })
"""Elements appearing in appinfo elements to support data types."""
# List of built-in namespaces.
BuiltInNamespaces = [
XMLSchema_instance,
XMLSchema_hfp,
XMLSchema,
XMLNamespaces,
XML,
XHTML
]
__InitializedBuiltinNamespaces = False
def _InitializeBuiltinNamespaces (structures_module):
"""Invoked at the end of the L{pyxb.xmlschema.structures} module to
initialize the component models of the built-in namespaces.
@param structures_module: The L{pyxb.xmlschema.structures} module may not
be importable by that name at the time this is invoked (because it is
still being processed), so it gets passed in as a parameter."""
global __InitializedBuiltinNamespaces
if not __InitializedBuiltinNamespaces:
__InitializedBuiltinNamespaces = True
[ _ns._defineBuiltins(structures_module) for _ns in BuiltInNamespaces ]
# Set up the prefixes for xml, xmlns, etc.
_UndeclaredNamespaceMap = { }
[ _UndeclaredNamespaceMap.setdefault(_ns.boundPrefix(), _ns) for _ns in BuiltInNamespaces if _ns.isUndeclaredNamespace() ]
| 47.400709
| 189
| 0.691629
|
c565df7d6caa3f041b072a2f142fb349f320b60b
| 22,000
|
py
|
Python
|
duckhunt.py
|
Grimnir9/cloudbot-stuff
|
5639e5e461d485db77bb87dfd7cfa6a241342d5b
|
[
"MIT"
] | null | null | null |
duckhunt.py
|
Grimnir9/cloudbot-stuff
|
5639e5e461d485db77bb87dfd7cfa6a241342d5b
|
[
"MIT"
] | null | null | null |
duckhunt.py
|
Grimnir9/cloudbot-stuff
|
5639e5e461d485db77bb87dfd7cfa6a241342d5b
|
[
"MIT"
] | null | null | null |
import random
import re
import operator
from time import time
from collections import defaultdict
from sqlalchemy import Table, Column, String, Integer, PrimaryKeyConstraint, desc
from sqlalchemy.sql import select
from cloudbot import hook
from cloudbot.event import EventType
from cloudbot.util import botvars
duck_tail = ["・゜゜・。。・゜゜", "。・゜゜・゜゜・。", "。。。。・゜゜゜゜・・"]
duck = ["\_o< ", "\_O< ", "\_0< ", "\_\u00f6< ", "\_\u00f8< ", "\_\u00f3< "]
duck_noise = ["QUACK!", "FLAP FLAP!", "quack!", "flap flap!", "QUACK QUACK!", "You're Despicable", "Go on! Shoot me again! I enjoy it!", "ALACAZAM!", "duck hunters is da cwaziest peoples!", "I'm Duck Twacy!", "Of course you realize, this means war!", "I'm The Hans Duck!"]
table = Table(
'duck_hunt',
botvars.metadata,
Column('network', String),
Column('name', String),
Column('shot', Integer),
Column('befriend', Integer),
Column('chan', String),
PrimaryKeyConstraint('name', 'chan','network')
)
optout = Table(
'nohunt',
botvars.metadata,
Column('network', String),
Column('chan', String),
PrimaryKeyConstraint('chan','network')
)
"""
game_status structure
{
'network':{
'#chan1':{
'duck_status':0|1|2,
'next_duck_time':'integer',
'game_started':0|1,
'no_duck_kick': 0|1,
'duck_time': 'float',
'shoot_time': 'float'
}
}
}
"""
scripters = defaultdict(int)
game_status = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
@hook.on_start()
def load_optout(db):
"""load a list of channels duckhunt should be off in. Right now I am being lazy and not
differentiating between networks this should be cleaned up later."""
global opt_out
opt_out = []
chans = db.execute(select([optout.c.chan]))
if chans:
for row in chans:
chan = row["chan"]
opt_out.append(chan)
@hook.command("starthunt", autohelp=False)
def start_hunt(bot, chan, message, conn):
"""This command starts a duckhunt in your channel, to stop the hunt use @stophunt"""
global game_status
if chan in opt_out:
return
elif not chan.startswith("#"):
return "If you're wanting some 'me' time that's cool but there's no hunting by yourself."
check = game_status[conn.name][chan]['game_on']
if check:
return "there is already a game running in {}.".format(chan)
else:
game_status[conn.name][chan]['game_on'] = 1
set_ducktime(chan, conn)
message("Ducks are migrating, to shoot ducks use .bang|.pew|.boom, use .befriend|.bef to save them.", chan)
def set_ducktime(chan, conn):
global game_status
# game_status[conn.name][chan]['next_duck_time'] = random.randint(int(time()) + 3600, int(time()) + 7200)
game_status[conn.name][chan]['next_duck_time'] = random.randint(int(time()) + 1800, int(time()) + 5400)
game_status[conn.name][chan]['flyaway'] = game_status[conn.name][chan]['next_duck_time'] + 600
game_status[conn.name][chan]['duck_status'] = 0
return
@hook.command("stophunt", autohelp=False)
def stop_hunt(chan, conn):
"""This command stops the duck hunt in your channel. Scores will be preserved"""
global game_status
if chan in opt_out:
return
if game_status[conn.name][chan]['game_on']:
game_status[conn.name][chan]['game_on'] = 0
return "the game has been stopped."
else:
return "There is no game running in {}.".format(chan)
@hook.command("duckkick", permissions=["op"])
def no_duck_kick(text, chan, conn, notice):
"""If the bot has OP or half-op in the channel you can specify .duckkick enable|disable so that people are kicked for shooting or befriending a non-existent goose. Default is off."""
global game_status
if chan in opt_out:
return
if text.lower() == 'enable':
game_status[conn.name][chan]['no_duck_kick'] = 1
return "users will now be kicked for shooting or befriending non-existent ducks. The bot needs to have appropriate flags to be able to kick users for this to work."
elif text.lower() == 'disable':
game_status[conn.name][chan]['no_duck_kick'] = 0
return "kicking for non-existent ducks has been disabled."
else:
notice(no_duck_kick.__doc__)
return
def generate_duck():
"""Try and randomize the duck message so people can't highlight on it/script against it."""
#rt = random.randint(1, len(duck_tail) - 1)
#dtail = duck_tail[:rt] + duck_tail[rt:]
dtail = random.choice(duck_tail)
dbody = random.choice(duck)
#rb = random.randint(1, len(dbody) - 1)
#dbody = dbody[:rb] + dbody[rb:]
dnoise = random.choice(duck_noise)
#rn = random.randint(1, len(dnoise) - 1)
#dnoise = dnoise[:rn] + dnoise[rn:]
return (dtail, dbody, dnoise)
@hook.periodic(11, initial_interval=11)
def deploy_duck(message, bot):
global game_status
for network in game_status:
if network not in bot.connections:
continue
conn = bot.connections[network]
if not conn.ready:
continue
for chan in game_status[network]:
active = game_status[network][chan]['game_on']
duck_status = game_status[network][chan]['duck_status']
next_duck = game_status[network][chan]['next_duck_time']
if active == 1 and duck_status == 0 and next_duck <= time():
#deploy a duck to channel
game_status[network][chan]['duck_status'] = 1
game_status[network][chan]['duck_time'] = time()
dtail, dbody, dnoise = generate_duck()
conn.message(chan, "{}{}{}".format(dtail, dbody, dnoise))
# Leave this commented out for now. I haven't decided how to make ducks leave.
#if active == 1 and duck_status == 1 and game_status[network][chan]['flyaway'] <= int(time()):
# conn.message(chan, "The duck flew away.")
# game_status[network][chan]['duck_status'] = 2
# set_ducktime(chan, conn)
continue
continue
def hit_or_miss(deploy, shoot):
"""This function calculates if the befriend or bang will be successful."""
if shoot - deploy < 1:
return .05
elif 1 <= shoot - deploy <= 7:
out = random.uniform(.60, .75)
return out
else:
return 1
def dbadd_entry(nick, chan, db, conn, shoot, friend):
"""Takes care of adding a new row to the database."""
query = table.insert().values(
network = conn.name,
chan = chan.lower(),
name = nick.lower(),
shot = shoot,
befriend = friend)
db.execute(query)
db.commit()
def dbupdate(nick, chan, db, conn, shoot, friend):
"""update a db row"""
if shoot and not friend:
query = table.update() \
.where(table.c.network == conn.name) \
.where(table.c.chan == chan.lower()) \
.where(table.c.name == nick.lower()) \
.values(shot = shoot)
db.execute(query)
db.commit()
elif friend and not shoot:
query = table.update() \
.where(table.c.network == conn.name) \
.where(table.c.chan == chan.lower()) \
.where(table.c.name == nick.lower()) \
.values(befriend = friend)
db.execute(query)
db.commit()
@hook.command("bang", "pew", "boom", autohelp=False)
def bang(nick, chan, message, db, conn, notice):
"""when there is a duck on the loose use this command to shoot it."""
global game_status, scripters
if chan in opt_out:
return
network = conn.name
score = ""
out = ""
miss = ["You just shot yourself in the foot, the duck laughed at you as it flew off.", "WHOOSH! You missed the duck completely!", "Your gun jammed!", "Better luck next time.", "Your barrel must be bent lol, maybe next time!", "Clearly you're using a BB gun, get a real gun and try again!", "Did you just throw a firecracker? Go buy a shotgun and come back!","Wow, Could you be a worse shot?" ]
if not game_status[network][chan]['game_on']:
return "There is no activehunt right now. Use @starthunt to start a game."
elif game_status[network][chan]['duck_status'] != 1:
if game_status[network][chan]['no_duck_kick'] == 1:
out = "KICK {} {} The last duck was already nabbed, try again with the next duck.".format(chan, nick)
conn.send(out)
return
return "The last duck was already nabbed, try again with the next duck."
else:
game_status[network][chan]['shoot_time'] = time()
deploy = game_status[network][chan]['duck_time']
shoot = game_status[network][chan]['shoot_time']
if nick.lower() in scripters:
if scripters[nick.lower()] > shoot:
notice("You are in a cool down period, you can try again in {} seconds.".format(str(scripters[nick.lower()] - shoot)))
return
chance = hit_or_miss(deploy, shoot)
if not random.random() <= chance and chance > .05:
out = random.choice(miss) + " You can try again in 3 seconds."
scripters[nick.lower()] = shoot + 3
return out
if chance == .05:
out += "You pulled the trigger in {} seconds, that's mighty fast. Are you running a script for this game? Take a 2 hour cool down.".format(str(shoot - deploy))
scripters[nick.lower()] = shoot + 7200
if not random.random() <= chance:
return random.choice(miss) + " " + out
else:
message(out)
game_status[network][chan]['duck_status'] = 2
score = db.execute(select([table.c.shot]) \
.where(table.c.network == conn.name) \
.where(table.c.chan == chan.lower()) \
.where(table.c.name == nick.lower())).fetchone()
if score:
score = score[0]
score += 1
dbupdate(nick, chan, db, conn, score, 0)
else:
score = 1
dbadd_entry(nick, chan, db, conn, score, 0)
timer = "{:.3f}".format(shoot - deploy)
duck = "duck" if score == 1 else "ducks"
message("{} Perfect aim, you shot the duck in {} seconds! You have killed {} {} in {}.".format(nick, timer, score, duck, chan))
set_ducktime(chan, conn)
@hook.command("befriend", "bef", autohelp=False)
def befriend(nick, chan, message, db, conn, notice):
"""when there is a duck on the loose use this command to befriend it before someone else shoots it."""
global game_status, scripters
if chan in opt_out:
return
network = conn.name
out = ""
score = ""
miss = ["The duck flipped you off and waddled away.", "The duck farted in your general direction.", "Well this is odd, the duck doesn't want to be your friend.", "The duck said no, maybe bribe it with some mcdonald's fries?", "The duck didn't recognise you as a friend, maybe get a duck outfit? https://is.gd/duck_outfit", "The ducks seem to be out of crackers, crackers make friends right?"]
if not game_status[network][chan]['game_on']:
return "There is no hunt right now. Use .starthunt to start a game."
elif game_status[network][chan]['duck_status'] != 1:
if game_status[network][chan]['no_duck_kick'] == 1:
out = "KICK {} {} Pay attention, the duck is already gone!".format(chan, nick)
conn.send(out)
return
return "Pay attention, the duck is already gone!"
else:
game_status[network][chan]['shoot_time'] = time()
deploy = game_status[network][chan]['duck_time']
shoot = game_status[network][chan]['shoot_time']
if nick.lower() in scripters:
if scripters[nick.lower()] > shoot:
notice("You are in a cool down period, you can try again in {} seconds.".format(str(scripters[nick.lower()] - shoot)))
return
chance = hit_or_miss(deploy, shoot)
if not random.random() <= chance and chance > .05:
out = random.choice(miss) + " You can try again in 3 seconds."
scripters[nick.lower()] = shoot + 3
return out
if chance == .05:
out += "You tried friending that duck in {} seconds, that's mighty fast. Are you running a script for this game? Take a 2 hour cool down.".format(str(shoot - deploy))
scripters[nick.lower()] = shoot + 7200
if not random.random() <= chance:
return random.choice(miss) + " " + out
else:
message(out)
game_status[network][chan]['duck_status'] = 2
score = db.execute(select([table.c.befriend]) \
.where(table.c.network == conn.name) \
.where(table.c.chan == chan.lower()) \
.where(table.c.name == nick.lower())).fetchone()
if score:
score = score[0]
score += 1
dbupdate(nick, chan, db, conn, 0, score)
else:
score = 1
dbadd_entry(nick, chan, db, conn, 0, score)
duck = "duck" if score == 1 else "ducks"
timer = "{:.3f}".format(shoot - deploy)
message("{} You befriended a duck in {} seconds! You have made friends with {} {} in {}.".format(nick, timer, score, duck, chan))
set_ducktime(chan,conn)
def smart_truncate(content, length=320, suffix='...'):
if len(content) <= length:
return content
else:
return content[:length].rsplit(' • ', 1)[0]+suffix
@hook.command("friends", autohelp=False)
def friends(text, chan, conn, db):
"""Prints a list of the top duck friends in the channel, if 'global' is specified all channels in the database are included."""
if chan in opt_out:
return
friends = defaultdict(int)
out = ""
if text.lower() == 'global':
out = "Duck friend scores across the network: "
scores = db.execute(select([table.c.name, table.c.befriend]) \
.where(table.c.network == conn.name) \
.order_by(desc(table.c.befriend)))
if scores:
for row in scores:
if row[1] == 0:
continue
friends[row[0]] += row[1]
else:
return "it appears no on has friended any ducks yet."
else:
out = "Duck friend scores in {}: ".format(chan)
scores = db.execute(select([table.c.name, table.c.befriend]) \
.where(table.c.network == conn.name) \
.where(table.c.chan == chan.lower()) \
.order_by(desc(table.c.befriend)))
if scores:
for row in scores:
if row[1] == 0:
continue
friends[row[0]] += row[1]
else:
return "it appears no one has friended any ducks yet."
topfriends = sorted(friends.items(), key=operator.itemgetter(1), reverse = True)
out += ' • '.join(["{}: {}".format('\x02' + k[:1] + u'\u200b' + k[1:] + '\x02', str(v)) for k, v in topfriends])
out = smart_truncate(out)
return out
@hook.command("killers", autohelp=False)
def killers(text, chan, conn, db):
"""Prints a list of the top duck killers in the channel, if 'global' is specified all channels in the database are included."""
if chan in opt_out:
return
killers = defaultdict(int)
out = ""
if text.lower() == 'global':
out = "Duck killer scores across the network: "
scores = db.execute(select([table.c.name, table.c.shot]) \
.where(table.c.network == conn.name) \
.order_by(desc(table.c.shot)))
if scores:
for row in scores:
if row[1] == 0:
continue
killers[row[0]] += row[1]
else:
return "it appears no one has killed any ducks yet."
else:
out = "Duck killer scores in {}: ".format(chan)
scores = db.execute(select([table.c.name, table.c.shot]) \
.where(table.c.network == conn.name) \
.where(table.c.chan == chan.lower()) \
.order_by(desc(table.c.shot)))
if scores:
for row in scores:
if row[1] == 0:
continue
killers[row[0]] += row[1]
else:
return "it appears no one has killed any ducks yet."
topkillers = sorted(killers.items(), key=operator.itemgetter(1), reverse = True)
out += ' • '.join(["{}: {}".format('\x02' + k[:1] + u'\u200b' + k[1:] + '\x02', str(v)) for k, v in topkillers])
out = smart_truncate(out)
return out
@hook.command("duckforgive", permissions=["op", "ignore"])
def duckforgive(text):
"""Allows people to be removed from the mandatory cooldown period."""
global scripters
if text.lower() in scripters and scripters[text.lower()] > time():
scripters[text.lower()] = 0
return "{} has been removed from the mandatory cooldown period.".format(text)
else:
return "I couldn't find anyone banned from the hunt by that nick"
@hook.command("hunt_opt_out", permissions=["op", "ignore"], autohelp=False)
def hunt_opt_out(text, chan, db, conn):
"""Running this command without any arguments displays the status of the current channel. hunt_opt_out add #channel will disable all duck hunt commands in the specified channel. hunt_opt_out remove #channel will re-enable the game for the specified channel."""
if not text:
if chan in opt_out:
return "Duck hunt is disabled in {}. To re-enable it run .hunt_opt_out remove #channel".format(chan)
else:
return "Duck hunt is enabled in {}. To disable it run .hunt_opt_out add #channel".format(chan)
if text == "list":
return ", ".join(opt_out)
if len(text.split(' ')) < 2:
return "please specify add or remove and a valid channel name"
command = text.split()[0]
channel = text.split()[1]
if not channel.startswith('#'):
return "Please specify a valid channel."
if command.lower() == "add":
if channel in opt_out:
return "Duck hunt has already been disabled in {}.".format(channel)
query = optout.insert().values(
network = conn.name,
chan = channel.lower())
db.execute(query)
db.commit()
load_optout(db)
if command.lower() == "remove":
if not channel in opt_out:
return "Duck hunt is already enabled in {}.".format(channel)
delete = optout.delete(optout.c.chan == channel.lower())
db.execute(delete)
db.commit()
load_optout(db)
@hook.command("ducks", autohelp=False)
def ducks_user(text, nick, chan, conn, db, message):
"""Prints a users duck stats. If no nick is input it will check the calling username."""
name = nick.lower()
if text:
name = text.split()[0].lower()
ducks = defaultdict(int)
scores = db.execute(select([table.c.name, table.c.chan, table.c.shot, table.c.befriend])
.where(table.c.network == conn.name)
.where(table.c.name == name)).fetchall()
if scores:
for row in scores:
if row["chan"].lower() == chan.lower():
ducks["chankilled"] += row["shot"]
ducks["chanfriends"] += row["befriend"]
ducks["killed"] += row["shot"]
ducks["friend"] += row["befriend"]
ducks["chans"] += 1
if ducks["chans"] == 1:
message("{} has killed {} and befriended {} ducks in {}.".format(name, ducks["chankilled"], ducks["chanfriends"], chan))
return
kill_average = int(ducks["killed"] / ducks["chans"])
friend_average = int(ducks["friend"] / ducks["chans"])
message("\x02{}'s\x02 duck stats: \x02{}\x02 killed and \x02{}\x02 befriended in {}. Across {} channels: \x02{}\x02 killed and \x02{}\x02 befriended. Averaging \x02{}\x02 kills and \x02{}\x02 friends per channel.".format(name, ducks["chankilled"], ducks["chanfriends"], chan, ducks["chans"], ducks["killed"], ducks["friend"], kill_average, friend_average))
else:
return "It appears {} has not participated in the duck hunt.".format(name)
@hook.command("duckstats", autohelp=False)
def duck_stats(chan, conn, db, message):
"""Prints duck statistics for the entire channel and totals for the network."""
ducks = defaultdict(int)
scores = db.execute(select([table.c.name, table.c.chan, table.c.shot, table.c.befriend])
.where(table.c.network == conn.name)).fetchall()
if scores:
ducks["friendchan"] = defaultdict(int)
ducks["killchan"] = defaultdict(int)
for row in scores:
ducks["friendchan"][row["chan"]] += row["befriend"]
ducks["killchan"][row["chan"]] += row["shot"]
#ducks["chans"] += 1
if row["chan"].lower() == chan.lower():
ducks["chankilled"] += row["shot"]
ducks["chanfriends"] += row["befriend"]
ducks["killed"] += row["shot"]
ducks["friend"] += row["befriend"]
ducks["chans"] = int((len(ducks["friendchan"]) + len(ducks["killchan"])) / 2)
killerchan, killscore = sorted(ducks["killchan"].items(), key=operator.itemgetter(1), reverse = True)[0]
friendchan, friendscore = sorted(ducks["friendchan"].items(), key=operator.itemgetter(1), reverse =True)[0]
message("\x02Duck Stats:\x02 {} killed and {} befriended in \x02{}\x02. Across {} channels \x02{}\x02 ducks have been killed and \x02{}\x02 befriended. \x02Top Channels:\x02 \x02{}\x02 with {} kills and \x02{}\x02 with {} friends".format(ducks["chankilled"], ducks["chanfriends"], chan, ducks["chans"], ducks["killed"], ducks["friend"], killerchan, killscore, friendchan, friendscore))
else:
return "It looks like there has been no duck activity on this channel or network."
| 44.715447
| 397
| 0.598818
|
8dd2651427c62a5c0c62838eca8d436262515db7
| 422
|
py
|
Python
|
tests/test_misc.py
|
vug/personalwebapp
|
6e36010c2e031fc206d8a0ae3972686cfbe792d4
|
[
"MIT"
] | null | null | null |
tests/test_misc.py
|
vug/personalwebapp
|
6e36010c2e031fc206d8a0ae3972686cfbe792d4
|
[
"MIT"
] | null | null | null |
tests/test_misc.py
|
vug/personalwebapp
|
6e36010c2e031fc206d8a0ae3972686cfbe792d4
|
[
"MIT"
] | null | null | null |
import unittest
from .base import TestBase
class TestMisc(TestBase):
def test_user_model(self):
user = self.get_test_user()
assert user.is_authenticated()
assert user.is_active()
assert not user.is_anonymous()
def test_test_user(self):
user = self.get_test_user()
assert user.email == self.test_user_email
assert user.password == self.test_user_password
| 24.823529
| 55
| 0.677725
|
9441617692b4780185ea9939d8cb40d3199c18e0
| 2,328
|
py
|
Python
|
code/train/00_samples.py
|
data-intelligence-for-health-lab/delirium_prediction
|
a0a25819ef6c98e32563b4e3b986c1a26fc30ed7
|
[
"MIT"
] | null | null | null |
code/train/00_samples.py
|
data-intelligence-for-health-lab/delirium_prediction
|
a0a25819ef6c98e32563b4e3b986c1a26fc30ed7
|
[
"MIT"
] | null | null | null |
code/train/00_samples.py
|
data-intelligence-for-health-lab/delirium_prediction
|
a0a25819ef6c98e32563b4e3b986c1a26fc30ed7
|
[
"MIT"
] | null | null | null |
# --- loading libraries -------------------------------------------------------
import os
import pandas as pd
# ------------------------------------------------------ loading libraries ----
# --- main routine ------------------------------------------------------------
# Loading train and validation datasets
train = pd.read_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/master_train.pickle', compression = 'zip')
validation = pd.read_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/master_validation.pickle', compression = 'zip')
calibration = pd.read_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/master_calibration.pickle', compression = 'zip')
test = pd.read_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/master_test.pickle', compression = 'zip')
train_200_ids = sorted(list(train['ADMISSION_ID'].unique()))[:200]
validation_200_ids = sorted(list(validation['ADMISSION_ID'].unique()))[:200]
calibration_200_ids = sorted(list(calibration['ADMISSION_ID'].unique()))[:200]
test_200_ids = sorted(list(test['ADMISSION_ID'].unique()))[:200]
sample_train = train[train['ADMISSION_ID'].isin(train_200_ids)].reset_index(drop = True)
sample_validation = validation[validation['ADMISSION_ID'].isin(validation_200_ids)].reset_index(drop = True)
sample_calibration = calibration[calibration['ADMISSION_ID'].isin(calibration_200_ids)].reset_index(drop = True)
sample_test = test[test['ADMISSION_ID'].isin(test_200_ids)].reset_index(drop = True)
if os.path.exists('/project/M-ABeICU176709/delirium/data/inputs/master/sample') == False:
os.mkdir( '/project/M-ABeICU176709/delirium/data/inputs/master/sample')
sample_train.to_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/sample/sample_train.pickle', compression = 'zip', protocol = 4)
sample_validation.to_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/sample/sample_validation.pickle', compression = 'zip', protocol = 4)
sample_calibration.to_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/sample/sample_calibration.pickle', compression = 'zip', protocol = 4)
sample_test.to_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/sample/sample_test.pickle', compression = 'zip', protocol = 4)
# ------------------------------------------------------------ main routine ---
| 68.470588
| 151
| 0.703179
|
ebf938aa541c986d0089382f0cbbdd429f0a1c50
| 2,691
|
py
|
Python
|
citadel/indico_citadel/cli.py
|
tomasr8/indico-plugins
|
b85e4ad826fa362aa32eb236e73c9ab2f7c7f465
|
[
"MIT"
] | null | null | null |
citadel/indico_citadel/cli.py
|
tomasr8/indico-plugins
|
b85e4ad826fa362aa32eb236e73c9ab2f7c7f465
|
[
"MIT"
] | null | null | null |
citadel/indico_citadel/cli.py
|
tomasr8/indico-plugins
|
b85e4ad826fa362aa32eb236e73c9ab2f7c7f465
|
[
"MIT"
] | null | null | null |
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import os
import sys
import time
import traceback
import click
from indico.cli.core import cli_group
from indico.core.db import db
from indico.util.console import cformat
from indico_citadel.models.id_map import CitadelIdMap
from indico_livesync.models.agents import LiveSyncAgent
@cli_group(name='citadel')
def cli():
"""Manage the Citadel plugin."""
@cli.command()
@click.option('--force', '-f', is_flag=True, help="Upload even if it has already been done once.")
@click.option('--retry', '-r', is_flag=True, help="Restart automatically after a failure")
@click.option('--batch', type=int, default=1000, show_default=True, metavar='N',
help="The amount of records yielded per upload batch.")
@click.option('--max-size', type=int, metavar='SIZE',
help="The max size (in MB) of files to upload. Defaults to the size from the plugin settings.")
def upload(batch, force, max_size, retry):
"""Upload file contents for full text search."""
agent = LiveSyncAgent.query.filter(LiveSyncAgent.backend_name == 'citadel').first()
if agent is None:
print('No citadel livesync agent found')
return
if not CitadelIdMap.query.has_rows():
print('It looks like you did not export any data to Citadel yet.')
print(cformat('To do so, run %{yellow!}indico livesync initial-export {}%{reset}').format(agent.id))
return
backend = agent.create_backend()
if not backend.is_configured():
print('Citadel is not properly configured.')
return
initial = not agent.settings.get('file_upload_done')
try:
total, errors, aborted = backend.run_export_files(batch, force, max_size=max_size, initial=initial)
except Exception:
if not retry:
raise
traceback.print_exc()
print('Restarting in 2 seconds\a')
time.sleep(2)
os.execl(sys.argv[0], *sys.argv)
return # exec doesn't return but just in case...
if not errors and not aborted:
print(f'{total} files uploaded')
if max_size is None:
backend.set_initial_file_upload_state(True)
db.session.commit()
else:
print('Max size was set; not enabling queue runs.')
else:
if aborted:
print('Upload aborted')
print(f'{total} files processed, {errors} failed')
print('Please re-run this script; queue runs will remain disabled for now')
| 35.88
| 109
| 0.672612
|
50d45deb89578fe325acc61eb61f6d62c4ea8667
| 13,877
|
py
|
Python
|
doc/ext/traffic-server.py
|
walkerfly/apache-trafficserver
|
c7cfa081ccc5c0b875a8ee665cde3399c48abd5c
|
[
"Apache-2.0"
] | null | null | null |
doc/ext/traffic-server.py
|
walkerfly/apache-trafficserver
|
c7cfa081ccc5c0b875a8ee665cde3399c48abd5c
|
[
"Apache-2.0"
] | null | null | null |
doc/ext/traffic-server.py
|
walkerfly/apache-trafficserver
|
c7cfa081ccc5c0b875a8ee665cde3399c48abd5c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TS Sphinx Directives
~~~~~~~~~~~~~~~~~~~~~~~~~
Sphinx Docs directives for Apache Traffic Server
:copyright: Copyright 2013 by the Apache Software Foundation
:license: Apache
"""
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from sphinx.domains import Domain, ObjType, std
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
import sphinx
class TSConfVar(std.Target):
"""
Description of a traffic server configuration variable.
Argument is the variable as defined in records.config.
Descriptive text should follow, indented.
Then the bulk description (if any) undented. This should be considered equivalent to the Doxygen
short and long description.
"""
option_spec = {
'class' : rst.directives.class_option,
'reloadable' : rst.directives.flag,
'deprecated' : rst.directives.flag,
'overridable' : rst.directives.flag,
'metric' : rst.directives.unchanged,
}
required_arguments = 3
optional_arguments = 1 # default is optional, special case if omitted
final_argument_whitespace = True
has_content = True
def make_field(self, tag, value):
field = nodes.field();
field.append(nodes.field_name(text=tag))
body = nodes.field_body()
if (isinstance(value, basestring)):
body.append(sphinx.addnodes.compact_paragraph(text=value))
else:
body.append(value)
field.append(body)
return field
# External entry point
def run(self):
env = self.state.document.settings.env
cv_default = None
cv_scope, cv_name, cv_type = self.arguments[0:3]
if (len(self.arguments) > 3):
cv_default = self.arguments[3]
# First, make a generic desc() node to be the parent.
node = sphinx.addnodes.desc()
node.document = self.state.document
node['objtype'] = 'cv'
# Next, make a signature node. This creates a permalink and a
# highlighted background when the link is selected.
title = sphinx.addnodes.desc_signature(cv_name, '')
title['ids'].append(nodes.make_id(cv_name))
title['ids'].append(cv_name)
title['names'].append(cv_name)
title['first'] = False
title['objtype'] = 'cv'
self.add_name(title)
title.set_class('ts-cv-title')
# Finally, add a desc_name() node to display the name of the
# configuration variable.
title += sphinx.addnodes.desc_name(cv_name, cv_name)
node.append(title)
if ('class' in self.options):
title.set_class(self.options.get('class'))
# This has to be a distinct node before the title. if nested then
# the browser will scroll forward to just past the title.
anchor = nodes.target('', '', names=[cv_name])
# Second (optional) arg is 'msgNode' - no idea what I should pass for that
# or if it even matters, although I now think it should not be used.
self.state.document.note_explicit_target(title)
env.domaindata['ts']['cv'][cv_name] = env.docname
fl = nodes.field_list()
fl.append(self.make_field('Scope', cv_scope))
fl.append(self.make_field('Type', cv_type))
if (cv_default):
fl.append(self.make_field('Default', cv_default))
else:
fl.append(self.make_field('Default', sphinx.addnodes.literal_emphasis(text='*NONE*')))
if ('metric' in self.options):
fl.append(self.make_field('Metric', self.options['metric']))
if ('reloadable' in self.options):
fl.append(self.make_field('Reloadable', 'Yes'))
if ('overridable' in self.options):
fl.append(self.make_field('Overridable', 'Yes'))
if ('deprecated' in self.options):
fl.append(self.make_field('Deprecated', 'Yes'))
# Get any contained content
nn = nodes.compound();
self.state.nested_parse(self.content, self.content_offset, nn)
# Create an index node so that Sphinx adds this config variable to the
# index. nodes.make_id() specifies the link anchor name that is
# implicitly generated by the anchor node above.
indexnode = sphinx.addnodes.index(entries=[])
indexnode['entries'].append(
('single', _('%s') % cv_name, nodes.make_id(cv_name), '')
)
return [ indexnode, node, fl, nn ]
class TSConfVarRef(XRefRole):
def process_link(self, env, ref_node, explicit_title_p, title, target):
return title, target
def metrictypes(typename):
return directives.choice(typename.lower(), ('counter','gauge','derivative','flag','text'))
def metricunits(unitname):
return directives.choice(unitname.lower(), ('ratio','percent','kbits','mbits','bytes','kbytes','mbytes','nanoseconds','microseconds','milliseconds','seconds'))
class TSStat(std.Target):
"""
Description of a traffic server statistic.
Argument is the JSON stat group ("global", etc.) in which the statistic is
returned, then the statistic name as used by traffic_ctl/stats_over_http,
followed by the value type of the statistic ('string', 'integer'), and
finally an example value.
Descriptive text should follow, indented.
Then the bulk description (if any) undented. This should be considered
equivalent to the Doxygen short and long description.
"""
option_spec = {
'type': metrictypes,
'unit': metricunits,
'introduced' : rst.directives.unchanged,
'deprecated' : rst.directives.unchanged,
'ungathered' : rst.directives.flag
}
required_arguments = 3
optional_arguments = 1 # example value is optional
final_argument_whitespace = True
has_content = True
def make_field(self, tag, value):
field = nodes.field();
field.append(nodes.field_name(text=tag))
body = nodes.field_body()
if (isinstance(value, basestring)):
body.append(sphinx.addnodes.compact_paragraph(text=value))
else:
body.append(value)
field.append(body)
return field
# External entry point
def run(self):
env = self.state.document.settings.env
stat_example = None
stat_group, stat_name, stat_type = self.arguments[0:3]
if (len(self.arguments) > 3):
stat_example = self.arguments[3]
# First, make a generic desc() node to be the parent.
node = sphinx.addnodes.desc()
node.document = self.state.document
node['objtype'] = 'stat'
# Next, make a signature node. This creates a permalink and a
# highlighted background when the link is selected.
title = sphinx.addnodes.desc_signature(stat_name, '')
title['ids'].append(nodes.make_id('stat-'+stat_name))
title['names'].append(stat_name)
title['first'] = False
title['objtype'] = 'stat'
self.add_name(title)
title.set_class('ts-stat-title')
# Finally, add a desc_name() node to display the name of the
# configuration variable.
title += sphinx.addnodes.desc_name(stat_name, stat_name)
node.append(title)
# This has to be a distinct node before the title. if nested then
# the browser will scroll forward to just past the title.
anchor = nodes.target('', '', names=[stat_name])
# Second (optional) arg is 'msgNode' - no idea what I should pass for that
# or if it even matters, although I now think it should not be used.
self.state.document.note_explicit_target(title)
env.domaindata['ts']['stat'][stat_name] = env.docname
fl = nodes.field_list()
fl.append(self.make_field('Collection', stat_group))
if ('type' in self.options):
fl.append(self.make_field('Type', self.options['type']))
if ('unit' in self.options):
fl.append(self.make_field('Units', self.options['unit']))
fl.append(self.make_field('Datatype', stat_type))
if ('introduced' in self.options and len(self.options['introduced']) > 0):
fl.append(self.make_field('Introduced', self.options['introduced']))
if ('deprecated' in self.options):
if (len(self.options['deprecated']) > 0):
fl.append(self.make_field('Deprecated', self.options['deprecated']))
else:
fl.append(self.make_field('Deprecated', 'Yes'))
if ('ungathered' in self.options):
fl.append(self.make_field('Gathered', 'No'))
if (stat_example):
fl.append(self.make_field('Example', stat_example))
# Get any contained content
nn = nodes.compound();
self.state.nested_parse(self.content, self.content_offset, nn)
# Create an index node so that Sphinx adds this statistic to the
# index. nodes.make_id() specifies the link anchor name that is
# implicitly generated by the anchor node above.
indexnode = sphinx.addnodes.index(entries=[])
indexnode['entries'].append(
('single', _('%s') % stat_name, nodes.make_id(stat_name), '')
)
return [ indexnode, node, fl, nn ]
class TSStatRef(XRefRole):
def process_link(self, env, ref_node, explicit_title_p, title, target):
return title, target
class TrafficServerDomain(Domain):
"""
Apache Traffic Server Documentation.
"""
name = 'ts'
label = 'Traffic Server'
data_version = 2
object_types = {
'cv': ObjType(l_('configuration variable'), 'cv'),
'stat': ObjType(l_('statistic'), 'stat')
}
directives = {
'cv' : TSConfVar,
'stat' : TSStat
}
roles = {
'cv' : TSConfVarRef(),
'stat' : TSStatRef()
}
initial_data = {
'cv' : {}, # full name -> docname
'stat' : {}
}
dangling_warnings = {
'cv' : "No definition found for configuration variable '%(target)s'",
'stat' : "No definition found for statistic '%(target)s'"
}
def clear_doc(self, docname):
cv_list = self.data['cv']
for var, doc in cv_list.items():
if doc == docname:
del cv_list[var]
stat_list = self.data['stat']
for var, doc in stat_list.items():
if doc == docname:
del stat_list[var]
def find_doc(self, key, obj_type):
zret = None
if obj_type == 'cv' :
obj_list = self.data['cv']
elif obj_type == 'stat' :
obj_list = self.data['stat']
else:
obj_list = None
if obj_list and key in obj_list:
zret = obj_list[key]
return zret
def resolve_xref(self, env, src_doc, builder, obj_type, target, node, cont_node):
dst_doc = self.find_doc(target, obj_type)
if (dst_doc):
return sphinx.util.nodes.make_refnode(builder, src_doc, dst_doc, nodes.make_id(target), cont_node, 'records.config')
def get_objects(self):
for var, doc in self.data['cv'].iteritems():
yield var, var, 'cv', doc, var, 1
for var, doc in self.data['stat'].iteritems():
yield var, var, 'stat', doc, var, 1
# These types are ignored as missing references for the C++ domain.
# We really need to do better with this. Editing this file for each of
# these is already getting silly.
EXTERNAL_TYPES = set((
'int', 'uint',
'uint8_t', 'uint16_t', 'uint24_t', 'uint32_t', 'uint64_t',
'int8_t', 'int16_t', 'int24_t', 'int32_t', 'int64_t',
'unsigned', 'unsigned int',
'off_t', 'size_t', 'time_t',
'Event', 'INK_MD5', 'DLL<EvacuationBlock>',
'sockaddr'
))
# Clean up specific references that we know will never be defined but are implicitly used by
# other domain directives. Hand convert them to literals.
def xref_cleanup(app, env, node, contnode):
rdomain = node['refdomain']
rtype = node['reftype']
rtarget = node['reftarget']
if ('cpp' == rdomain) or ('c' == rdomain):
if 'type' == rtype:
# one of the predefined type, or a pointer or reference to it.
if (rtarget in EXTERNAL_TYPES) or (('*' == rtarget[-1] or '&' == rtarget[-1]) and rtarget[:-1] in EXTERNAL_TYPES):
node = nodes.literal()
node += contnode
return node
return;
def setup(app):
app.add_crossref_type('configfile', 'file',
objname='Configuration file',
indextemplate='pair: %s; Configuration files')
rst.roles.register_generic_role('arg', nodes.emphasis)
rst.roles.register_generic_role('const', nodes.literal)
app.add_domain(TrafficServerDomain)
# Types that we want the C domain to consider built in
for word in EXTERNAL_TYPES:
sphinx.domains.c.CObject.stopwords.add(word)
app.connect('missing-reference', xref_cleanup)
| 36.71164
| 163
| 0.629747
|
b18d7ebac40838f08ad33a67ef2937277f23074f
| 5,908
|
py
|
Python
|
pyspike/spikes.py
|
ElsevierSoftwareX/SOFTX-D-16-00032
|
50b3d0bd1ceea236d8b4e95578cebf7c78a5035e
|
[
"BSD-2-Clause"
] | 5
|
2018-09-10T06:14:31.000Z
|
2022-01-26T20:19:42.000Z
|
pyspike/spikes.py
|
ElsevierSoftwareX/SOFTX-D-16-00032
|
50b3d0bd1ceea236d8b4e95578cebf7c78a5035e
|
[
"BSD-2-Clause"
] | null | null | null |
pyspike/spikes.py
|
ElsevierSoftwareX/SOFTX-D-16-00032
|
50b3d0bd1ceea236d8b4e95578cebf7c78a5035e
|
[
"BSD-2-Clause"
] | 5
|
2019-11-08T00:52:34.000Z
|
2021-05-20T20:34:10.000Z
|
# Module containing several function to load and transform spike trains
# Copyright 2014, Mario Mulansky <mario.mulansky@gmx.net>
# Distributed under the BSD License
import numpy as np
from pyspike import SpikeTrain
############################################################
# spike_train_from_string
############################################################
def spike_train_from_string(s, edges, sep=' ', is_sorted=False):
""" Converts a string of times into a :class:`.SpikeTrain`.
:param s: the string with (ordered) spike times.
:param edges: interval defining the edges of the spike train.
Given as a pair of floats (T0, T1) or a single float T1,
where T0=0 is assumed.
:param sep: The separator between the time numbers, default=' '.
:param is_sorted: if True, the spike times are not sorted after loading,
if False, spike times are sorted with `np.sort`
:returns: :class:`.SpikeTrain`
"""
return SpikeTrain(np.fromstring(s, sep=sep), edges, is_sorted)
############################################################
# load_spike_trains_txt
############################################################
def load_spike_trains_from_txt(file_name, edges,
separator=' ', comment='#', is_sorted=False,
ignore_empty_lines=True):
""" Loads a number of spike trains from a text file. Each line of the text
file should contain one spike train as a sequence of spike times separated
by `separator`. Empty lines as well as lines starting with `comment` are
neglected. The `edges` represents the start and the end of the
spike trains.
:param file_name: The name of the text file.
:param edges: A pair (T_start, T_end) of values representing the
start and end time of the spike train measurement
or a single value representing the end time, the
T_start is then assuemd as 0.
:param separator: The character used to seprate the values in the text file
:param comment: Lines starting with this character are ignored.
:param sort: If true, the spike times are order via `np.sort`, default=True
:returns: list of :class:`.SpikeTrain`
"""
spike_trains = []
spike_file = open(file_name, 'r')
for line in spike_file:
if len(line) > 1 and not line.startswith(comment):
# use only the lines with actual data and not commented
spike_train = spike_train_from_string(line, edges,
separator, is_sorted)
spike_trains.append(spike_train)
return spike_trains
############################################################
# merge_spike_trains
############################################################
def merge_spike_trains(spike_trains):
""" Merges a number of spike trains into a single spike train.
:param spike_trains: list of :class:`.SpikeTrain`
:returns: spike train with the merged spike times
"""
# get the lengths of the spike trains
lens = np.array([len(st.spikes) for st in spike_trains])
merged_spikes = np.empty(np.sum(lens))
index = 0 # the index for merged_spikes
indices = np.zeros_like(lens) # indices of the spike trains
index_list = np.arange(len(indices)) # indices of indices of spike trains
# that have not yet reached the end
# list of the possible events in the spike trains
vals = [spike_trains[i].spikes[indices[i]] for i in index_list]
while len(index_list) > 0:
i = np.argmin(vals) # the next spike is the minimum
merged_spikes[index] = vals[i] # put it to the merged spike train
i = index_list[i]
index += 1 # next index of merged spike train
indices[i] += 1 # next index for the chosen spike train
if indices[i] >= lens[i]: # remove spike train index if ended
index_list = index_list[index_list != i]
vals = [spike_trains[n].spikes[indices[n]] for n in index_list]
return SpikeTrain(merged_spikes, [spike_trains[0].t_start,
spike_trains[0].t_end])
############################################################
# generate_poisson_spikes
############################################################
def generate_poisson_spikes(rate, interval):
""" Generates a Poisson spike train with the given rate in the given time
interval
:param rate: The rate of the spike trains
:param interval: A pair (T_start, T_end) of values representing the
start and end time of the spike train measurement or
a single value representing the end time, the T_start
is then assuemd as 0. Auxiliary spikes will be added
to the spike train at the beginning and end of this
interval, if they are not yet present.
:type interval: pair of doubles or double
:returns: Poisson spike train as a :class:`.SpikeTrain`
"""
try:
T_start = interval[0]
T_end = interval[1]
except:
T_start = 0
T_end = interval
# roughly how many spikes are required to fill the interval
N = max(1, int(1.2 * rate * (T_end-T_start)))
N_append = max(1, int(0.1 * rate * (T_end-T_start)))
intervals = np.random.exponential(1.0/rate, N)
# make sure we have enough spikes
while T_start + sum(intervals) < T_end:
# print T_start + sum(intervals)
intervals = np.append(intervals,
np.random.exponential(1.0/rate, N_append))
spikes = T_start + np.cumsum(intervals)
spikes = spikes[spikes < T_end]
return SpikeTrain(spikes, interval)
| 46.888889
| 79
| 0.580399
|
2aa88002f3ed01517b2dcd3f0936fd9078689009
| 1,436
|
py
|
Python
|
figthesis/figchangebs.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
figthesis/figchangebs.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
figthesis/figchangebs.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
import os
import numpy as np
from matplotlib import pyplot as plt
import figlatex
import fingersnr
cacheprefix = 'figthesis/figchangebs'
baselines = [8000, 1000, 200]
cache_snrseries = 'figthesis/figsnrplot.npz'
###########################
cache = lambda bs: f'{cacheprefix}-{bs}.npz'
if not all(os.path.exists(cache(bs)) for bs in baselines):
if not os.path.exists(cache_snrseries):
raise FileNotFoundError(f'File {cache_snrseries} missing, run figsnrplot.py')
print(f'read {cache_snrseries}...')
with np.load(cache_snrseries) as arch:
tau, delta_ma, delta_exp, delta_mf, waveform, snr = tuple(arch.values())
hint_delta_ma = delta_ma[np.arange(len(tau)), np.argmax(snr[0], axis=-1)]
fs = fingersnr.FingerSnr()
for bs in baselines:
if os.path.exists(cache(bs)):
continue
out = fs.snrmax(plot=False, hint_delta_ma=hint_delta_ma, bslen=bs)
print(f'write {cache(bs)}...')
np.savez(cache(bs), *out)
snrmax_outputs = []
for bs in baselines:
print(f'read {cache(bs)}...')
with np.load(cache(bs)) as arch:
snrmax_outputs.append(tuple(arch.values()))
fig = plt.figure(num='figchangebs', clear=True, figsize=[9, 5])
axs = fingersnr.FingerSnr.snrmaxplot_multiple(fig, snrmax_outputs)
for bs, ax in zip(baselines, axs[0]):
ax.set_title(f'{bs} baseline samples')
fig.tight_layout()
fig.show()
figlatex.save(fig)
| 27.09434
| 85
| 0.662953
|
d8a0f3ec1d33045ed352ac7af3ced96f00174913
| 15,433
|
py
|
Python
|
flexmatch.py
|
houwenxin/TorchSSL
|
4086504b4c8a4cb887bf480faa97ad607fef3ed1
|
[
"MIT"
] | null | null | null |
flexmatch.py
|
houwenxin/TorchSSL
|
4086504b4c8a4cb887bf480faa97ad607fef3ed1
|
[
"MIT"
] | null | null | null |
flexmatch.py
|
houwenxin/TorchSSL
|
4086504b4c8a4cb887bf480faa97ad607fef3ed1
|
[
"MIT"
] | null | null | null |
# import needed library
import os
import logging
import random
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
from utils import net_builder, get_logger, count_parameters, over_write_args_from_file
from train_utils import TBLog, get_optimizer, get_cosine_schedule_with_warmup
from models.flexmatch.flexmatch import FlexMatch
from datasets.ssl_dataset import SSL_Dataset, ImageNetLoader
from datasets.data_utils import get_data_loader
def main(args):
'''
For (Distributed)DataParallelism,
main(args) spawn each process (main_worker) to each GPU.
'''
save_path = os.path.join(args.save_dir, args.save_name)
if os.path.exists(save_path) and not args.overwrite:
raise Exception('already existing model: {}'.format(save_path))
if args.resume:
if args.load_path is None:
raise Exception('Resume of training requires --load_path in the args')
if os.path.abspath(save_path) == os.path.abspath(args.load_path) and not args.overwrite:
raise Exception('Saving & Loading pathes are same. \
If you want over-write, give --overwrite in the argument.')
if args.seed is not None:
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
# distributed: true if manually selected or if world_size > 1
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count() # number of gpus of each node
if args.multiprocessing_distributed:
# now, args.world_size means num of total processes in all nodes
args.world_size = ngpus_per_node * args.world_size
# args=(,) means the arguments of main_worker
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
'''
main_worker is conducted on each GPU.
'''
global best_acc1
args.gpu = gpu
# random seed has to be set for the syncronization of labeled data sampling in each process.
assert args.seed is not None
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
cudnn.deterministic = True
# SET UP FOR DISTRIBUTED TRAINING
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu # compute global rank
# set distributed group:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# SET save_path and logger
save_path = os.path.join(args.save_dir, args.save_name)
logger_level = "WARNING"
tb_log = None
if args.rank % ngpus_per_node == 0:
tb_log = TBLog(save_path, 'tensorboard', use_azure=args.use_azure)
logger_level = "INFO"
logger = get_logger(args.save_name, save_path, logger_level)
logger.warning(f"USE GPU: {args.gpu} for training")
# SET flexmatch: class flexmatch in models.flexmatch
args.bn_momentum = 1.0 - 0.999
if 'imagenet' in args.dataset.lower():
_net_builder = net_builder('ResNet50', False, None, is_remix=False)
else:
_net_builder = net_builder(args.net,
args.net_from_name,
{'first_stride': 2 if 'stl' in args.dataset else 1,
'depth': args.depth,
'widen_factor': args.widen_factor,
'leaky_slope': args.leaky_slope,
'bn_momentum': args.bn_momentum,
'dropRate': args.dropout,
'use_embed': False,
'is_remix': False},
)
model = FlexMatch(_net_builder,
args.num_classes,
args.ema_m,
args.T,
args.p_cutoff,
args.ulb_loss_ratio,
args.hard_label,
num_eval_iter=args.num_eval_iter,
tb_log=tb_log,
logger=logger)
logger.info(f'Number of Trainable Params: {count_parameters(model.model)}')
# SET Optimizer & LR Scheduler
## construct SGD and cosine lr scheduler
optimizer = get_optimizer(model.model, args.optim, args.lr, args.momentum, args.weight_decay)
scheduler = get_cosine_schedule_with_warmup(optimizer,
args.num_train_iter,
num_warmup_steps=args.num_train_iter * 0)
## set SGD and cosine lr on flexmatch
model.set_optimizer(optimizer, scheduler)
# SET Devices for (Distributed) DataParallel
if not torch.cuda.is_available():
raise Exception('ONLY GPU TRAINING IS SUPPORTED')
elif args.distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
'''
batch_size: batch_size per node -> batch_size per gpu
workers: workers per node -> workers per gpu
'''
args.batch_size = int(args.batch_size / ngpus_per_node)
model.model.cuda(args.gpu)
model.model = nn.SyncBatchNorm.convert_sync_batchnorm(model.model)
model.model = torch.nn.parallel.DistributedDataParallel(model.model,
device_ids=[args.gpu],
broadcast_buffers=False,
find_unused_parameters=True)
else:
# if arg.gpu is None, DDP will divide and allocate batch_size
# to all available GPUs if device_ids are not set.
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.model = model.model.cuda(args.gpu)
else:
model.model = torch.nn.DataParallel(model.model).cuda()
logger.info(f"model_arch: {model}")
logger.info(f"Arguments: {args}")
cudnn.benchmark = True
# Construct Dataset & DataLoader
if args.dataset.lower() != "imagenet":
train_dset = SSL_Dataset(args, alg='flexmatch', name=args.dataset, train=True,
num_classes=args.num_classes, data_dir=args.data_dir)
lb_dset, ulb_dset = train_dset.get_ssl_dset(args.num_labels)
_eval_dset = SSL_Dataset(args, alg='flexmatch', name=args.dataset, train=False,
num_classes=args.num_classes, data_dir=args.data_dir)
eval_dset = _eval_dset.get_dset()
else:
image_loader = ImageNetLoader(root_path=args.data_dir, num_labels=args.num_labels,
num_class=args.num_classes)
lb_dset = image_loader.get_lb_train_data()
ulb_dset = image_loader.get_ulb_train_data()
eval_dset = image_loader.get_lb_test_data()
if args.use_azure == True and args.rank % ngpus_per_node == 0:
try:
from azure_utils import save_to_azure
os.rename('sampled_label_idx.json', 'flexmatch_sampled_label_idx.json')
save_to_azure('flexmatch_sampled_label_idx.json', os.path.join(args.save_name, 'flexmatch_sampled_label_idx.json'))
except:
print("Failed to save sampled_label_idx.json to Azure")
loader_dict = {}
dset_dict = {'train_lb': lb_dset, 'train_ulb': ulb_dset, 'eval': eval_dset}
loader_dict['train_lb'] = get_data_loader(dset_dict['train_lb'],
args.batch_size,
data_sampler=args.train_sampler,
num_iters=args.num_train_iter,
num_workers=args.num_workers,
distributed=args.distributed)
loader_dict['train_ulb'] = get_data_loader(dset_dict['train_ulb'],
args.batch_size * args.uratio,
data_sampler=args.train_sampler,
num_iters=args.num_train_iter,
num_workers=4 * args.num_workers,
distributed=args.distributed)
loader_dict['eval'] = get_data_loader(dset_dict['eval'],
args.eval_batch_size,
num_workers=args.num_workers,
drop_last=False)
## set DataLoader and ulb_dset on FlexMatch
model.set_data_loader(loader_dict)
model.set_dset(ulb_dset)
# If args.resume, load checkpoints from args.load_path
if args.resume:
model.load_model(args.load_path)
# START TRAINING of flexmatch
trainer = model.train
for epoch in range(args.epoch):
trainer(args, logger=logger)
if not args.multiprocessing_distributed or \
(args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
model.save_model('latest_model.pth', save_path)
logging.warning(f"GPU {args.rank} training is FINISHED")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
'''
Saving & loading of the model.
'''
parser.add_argument('--save_dir', type=str, default='./saved_models')
parser.add_argument('-sn', '--save_name', type=str, default='flexmatch')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--load_path', type=str, default=None)
parser.add_argument('-o', '--overwrite', action='store_true')
'''
Training Configuration of flexmatch
'''
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--num_train_iter', type=int, default=2 ** 20,
help='total number of training iterations')
parser.add_argument('--num_eval_iter', type=int, default=5000,
help='evaluation frequency')
parser.add_argument('-nl', '--num_labels', type=int, default=40)
parser.add_argument('-bsz', '--batch_size', type=int, default=64)
parser.add_argument('--uratio', type=int, default=7,
help='the ratio of unlabeled data to labeld data in each mini-batch')
parser.add_argument('--eval_batch_size', type=int, default=1024,
help='batch size of evaluation data loader (it does not affect the accuracy)')
parser.add_argument('--hard_label', type=str2bool, default=True)
parser.add_argument('--T', type=float, default=0.5)
parser.add_argument('--p_cutoff', type=float, default=0.95)
parser.add_argument('--ema_m', type=float, default=0.999, help='ema momentum for eval_model')
parser.add_argument('--ulb_loss_ratio', type=float, default=1.0)
parser.add_argument('--use_DA', type=str2bool, default=False)
parser.add_argument('-w', '--thresh_warmup', type=str2bool, default=True)
'''
Optimizer configurations
'''
parser.add_argument('--optim', type=str, default='SGD')
parser.add_argument('--lr', type=float, default=3e-2)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--amp', type=str2bool, default=False, help='use mixed precision training or not')
parser.add_argument('--clip', type=float, default=0)
'''
Backbone Net Configurations
'''
parser.add_argument('--net', type=str, default='WideResNet')
parser.add_argument('--net_from_name', type=str2bool, default=False)
parser.add_argument('--depth', type=int, default=28)
parser.add_argument('--widen_factor', type=int, default=2)
parser.add_argument('--leaky_slope', type=float, default=0.1)
parser.add_argument('--dropout', type=float, default=0.0)
'''
Data Configurations
'''
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('-ds', '--dataset', type=str, default='cifar10')
parser.add_argument('--train_sampler', type=str, default='RandomSampler')
parser.add_argument('-nc', '--num_classes', type=int, default=10)
parser.add_argument('--num_workers', type=int, default=1)
'''
multi-GPUs & Distrbitued Training
'''
## args for distributed training (from https://github.com/pytorch/examples/blob/master/imagenet/main.py)
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='**node rank** for distributed training')
parser.add_argument('-du', '--dist-url', default='tcp://127.0.0.1:10601', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=1, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', type=str2bool, default=True,
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# config file
parser.add_argument('--c', type=str, default='')
'''
Azure configuration
'''
parser.add_argument('--use_azure', type=str2bool, default=False,
help='use azure')
args = parser.parse_args()
over_write_args_from_file(args, args.c)
main(args)
| 43.351124
| 127
| 0.603642
|
b2579c726883ee7229087849583b5b80bff7f4f4
| 143
|
py
|
Python
|
tests/settings/images_store_format_all_with_thumbs.py
|
teolemon/django-dynamic-scraper
|
2a46df8828fa8dcf4f74315abe99cc37b214b2e8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings/images_store_format_all_with_thumbs.py
|
teolemon/django-dynamic-scraper
|
2a46df8828fa8dcf4f74315abe99cc37b214b2e8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings/images_store_format_all_with_thumbs.py
|
teolemon/django-dynamic-scraper
|
2a46df8828fa8dcf4f74315abe99cc37b214b2e8
|
[
"BSD-3-Clause"
] | null | null | null |
from settings.base_settings import *
IMAGES_THUMBS = {
'medium': (50, 50),
'small': (25, 25),
}
DSCRAPER_IMAGES_STORE_FORMAT = 'ALL'
| 15.888889
| 36
| 0.657343
|
18874981f9a1fa9bc9a21ad0ed50d36aa9bf2273
| 1,114
|
py
|
Python
|
TIL/bs_move.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | 1
|
2020-10-23T14:29:24.000Z
|
2020-10-23T14:29:24.000Z
|
TIL/bs_move.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | null | null | null |
TIL/bs_move.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import cv2
cap = cv2.VideoCapture('PETS2000.avi')
if not cap.isOpened():
print('video not opened!')
sys.exit()
ret, back = cap.read()
if not ret :
print('back ground img error')
sys.exit()
back = cv2.cvtColor(back, cv2.COLOR_BGR2GRAY)
back = cv2.GaussianBlur(back, (0,0), 1.0)
fback = back.astype(np.float32)
while True:
ret , frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (0,0), 1.0)
cv2.accumulateWeighted(gray, fback, 0.01)
back = fback.astype(np.uint8)
diff = cv2.absdiff(gray, back)
_, diff = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)
cnt, _, stats, _ = cv2.connectedComponentsWithStats(diff)
for i in range(1, cnt):
x, y, w, h, s = stats[i]
if s < 100:
continue
cv2.rectangle(frame, (x, y, w, h), (0, 0 , 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('diff', diff )
cv2.imshow('back', back)
if cv2.waitKey(30) ==27:
break
cap.release()
cv2.destroyAllWindows()
| 19.892857
| 61
| 0.609515
|
3e1bac8f67cfa6f88c8920b067b013028a6ea89b
| 1,566
|
py
|
Python
|
polling_stations/apps/councils/migrations/0001_initial.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | 29
|
2015-03-10T08:41:34.000Z
|
2022-01-12T08:51:38.000Z
|
polling_stations/apps/councils/migrations/0001_initial.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | 4,112
|
2015-04-01T21:27:38.000Z
|
2022-03-31T19:22:11.000Z
|
polling_stations/apps/councils/migrations/0001_initial.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | 31
|
2015-03-18T14:52:50.000Z
|
2022-02-24T10:31:07.000Z
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Council",
fields=[
(
"council_id",
models.CharField(max_length=100, serialize=False, primary_key=True),
),
("council_type", models.CharField(max_length=10, blank=True)),
("mapit_id", models.CharField(max_length=100, blank=True)),
("name", models.CharField(max_length=255, blank=True)),
("email", models.EmailField(max_length=75, blank=True)),
("phone", models.CharField(max_length=100, blank=True)),
("website", models.URLField(blank=True)),
("postcode", models.CharField(max_length=100, null=True, blank=True)),
("address", models.TextField(null=True, blank=True)),
(
"location",
django.contrib.gis.db.models.fields.PointField(
srid=4326, null=True, blank=True
),
),
(
"area",
django.contrib.gis.db.models.fields.MultiPolygonField(
srid=4326, null=True, geography=True, blank=True
),
),
],
options={},
bases=(models.Model,),
)
]
| 34.8
| 88
| 0.488506
|
e536832762d5fa67c8474926fbe2fa84db8a23d0
| 6,037
|
py
|
Python
|
thenewboston_node/business_logic/tests/test_models/test_signed_change_request/test_node_declaration_signed_change_request.py
|
andbortnik/thenewboston-node
|
bd63c7def5f224286dba70f9560252a7da8ea712
|
[
"MIT"
] | null | null | null |
thenewboston_node/business_logic/tests/test_models/test_signed_change_request/test_node_declaration_signed_change_request.py
|
andbortnik/thenewboston-node
|
bd63c7def5f224286dba70f9560252a7da8ea712
|
[
"MIT"
] | null | null | null |
thenewboston_node/business_logic/tests/test_models/test_signed_change_request/test_node_declaration_signed_change_request.py
|
andbortnik/thenewboston-node
|
bd63c7def5f224286dba70f9560252a7da8ea712
|
[
"MIT"
] | null | null | null |
import pytest
from thenewboston_node.business_logic.exceptions import ValidationError
from thenewboston_node.business_logic.models import NodeDeclarationSignedChangeRequest
from thenewboston_node.core.utils.types import hexstr
def test_can_create_node_declaration_signed_change_request(user_account_key_pair):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['127.0.0.1'],
fee_amount=3,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
assert request
assert request.signer
assert request.signature
assert request.message
assert request.message.node.network_addresses == ['127.0.0.1']
assert request.message.node.fee_amount == 3
assert request.message.node.fee_account == 'be10aa7e'
def test_node_declaration_signed_change_request_validate_empty_network_address(
user_account_key_pair, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[], fee_amount=3, fee_account=hexstr('be10aa7e'), signing_key=user_account_key_pair.private
)
request.validate(forced_memory_blockchain, block_number=0)
@pytest.mark.parametrize(
'network_address', [
'http://127.0.0.1',
'http://127.0.0.1:8080',
'http://[2001:db8::123.123.123.123]:80',
'http://xn--d1acufc.xn--p1ai',
'http://example.com',
'https://my.domain.com',
'https://my.domain.com/path/to/resource#fragment',
'http://localhost:8555/',
]
)
def test_node_declaration_signed_change_request_valid_network_addresses(
user_account_key_pair, network_address, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[network_address],
fee_amount=3,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
request.validate(forced_memory_blockchain, block_number=0)
def test_node_declaration_signed_change_request_validate_empty_hostname(
user_account_key_pair, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['http://'],
fee_amount=3,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node network_addresses hostname must be not empty'):
request.validate(forced_memory_blockchain, block_number=0)
def test_node_declaration_signed_change_request_validate_network_addresses_scheme(
user_account_key_pair, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['ftp://my.domain.com/'],
fee_amount=3,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node network_addresses scheme must be one of http, https'):
request.validate(forced_memory_blockchain, block_number=0)
def test_node_declaration_signed_change_request_validate_empty_scheme(user_account_key_pair, forced_memory_blockchain):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['my.domain.com/'],
fee_amount=3,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node network_addresses scheme must be not empty'):
request.validate(forced_memory_blockchain, block_number=0)
@pytest.mark.parametrize('network_addresses', ['', None])
def test_node_declaration_signed_change_request_validate_empty_network_addresses(
user_account_key_pair, network_addresses, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[network_addresses],
fee_amount=3,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node network_addresses must be not empty'):
request.validate(forced_memory_blockchain, block_number=0)
def test_node_declaration_signed_change_request_validate_negative_fee_amount(
user_account_key_pair, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[], fee_amount=-3, fee_account=hexstr('be10aa7e'), signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node fee_amount must be greater or equal to 0'):
request.validate(forced_memory_blockchain, block_number=0)
@pytest.mark.parametrize('fee_amount', [0, 3])
def test_node_declaration_signed_change_request_validate_fee_amount(
user_account_key_pair, fee_amount, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[],
fee_amount=fee_amount,
fee_account=hexstr('be10aa7e'),
signing_key=user_account_key_pair.private
)
request.validate(forced_memory_blockchain, block_number=0)
def test_node_declaration_signed_change_request_validate_fee_account_type(
user_account_key_pair, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[], fee_amount=3, fee_account=1, signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node fee_account must be string'):
request.validate(forced_memory_blockchain, block_number=0)
def test_node_declaration_signed_change_request_validate_fee_account_is_hexadecimal(
user_account_key_pair, forced_memory_blockchain
):
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[],
fee_amount=3,
fee_account=hexstr('non-hexadecimal'),
signing_key=user_account_key_pair.private
)
with pytest.raises(ValidationError, match='Node fee_account must be hexadecimal string'):
request.validate(forced_memory_blockchain, block_number=0)
| 39.457516
| 119
| 0.764618
|
505ea1cd9832819f8d3dd590e458cbceaf3925a4
| 10,235
|
py
|
Python
|
Pilot1/Uno_UQ/uno_inferUQ_keras2.py
|
j-woz/Benchmarks
|
d518162fdafb7cfa26071b6a30a3b456dad024f6
|
[
"MIT"
] | 2
|
2021-02-06T06:47:19.000Z
|
2021-02-24T13:45:02.000Z
|
Pilot1/Uno_UQ/uno_inferUQ_keras2.py
|
j-woz/Benchmarks
|
d518162fdafb7cfa26071b6a30a3b456dad024f6
|
[
"MIT"
] | null | null | null |
Pilot1/Uno_UQ/uno_inferUQ_keras2.py
|
j-woz/Benchmarks
|
d518162fdafb7cfa26071b6a30a3b456dad024f6
|
[
"MIT"
] | 1
|
2019-08-14T14:29:42.000Z
|
2019-08-14T14:29:42.000Z
|
#! /usr/bin/env python
from __future__ import division, print_function
import argparse
import logging
import os
import numpy as np
import pandas as pd
from itertools import cycle
from keras import backend as K
import keras
from keras.utils import get_custom_objects
import data_utils_.uno as uno
import candle
import data_utils_.uno_combined_data_loader as uno_combined_data_loader
import data_utils_.uno_combined_data_generator as uno_combined_data_generator
import model_utils_.uno_model_utils as uno_model_utils
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
additional_definitions_local = [
{'name':'uq_infer_file',
'default':argparse.SUPPRESS,
'action':'store',
'help':'File to do inference'},
{'name':'uq_infer_given_drugs',
'type': candle.str2bool,
'default': False,
'help':'Use given inference file to obtain drug ids to do inference'},
{'name':'uq_infer_given_cells',
'type': candle.str2bool,
'default': False,
'help':'Use given inference file to obtain cell ids to do inference'},
{'name':'uq_infer_given_indices',
'type': candle.str2bool,
'default': False,
'help':'Use given inference file to obtain indices to do inference'},
{'name':'model_file',
'type':str,
'default':'saved.model.h5',
'help':'trained model file'},
{'name':'weights_file',
'type':str,
'default':'saved.weights.h5',
'help':'trained weights file (loading model file alone sometimes does not work in keras)'},
{'name':'n_pred',
'type':int,
'default':1,
'help':'the number of predictions to make for each sample-drug combination for uncertainty quantification'}
]
required_local = ( 'model_file', 'weights_file', 'uq_infer_file',
'agg_dose', 'batch_size')
def initialize_parameters(default_model='uno_defaultUQ_model.txt'):
# Build benchmark object
unoBmk = uno.BenchmarkUno(uno.file_path, default_model, 'keras',
prog='uno_inferUQ', desc='Read models to predict tumor response to single and paired drugs.')
unoBmk.additional_definitions += additional_definitions_local
unoBmk.required = unoBmk.required.union(required_local)
# Initialize parameters
gParameters = candle.finalize_parameters(unoBmk)
#benchmark.logger.info('Params: {}'.format(gParameters))
return gParameters
def from_file(args, model):
df_data = pd.read_csv(args.uq_infer_file, sep='\t')
logger.info('data shape: {}'.format(df_data.shape))
logger.info('Size of data to infer: {}'.format(df_data.shape))
test_indices = range(df_data.shape[0])
target_str = args.agg_dose or 'Growth'
# Extract size of input layers to get number of features
num_features_list = []
feature_names_list = []
for layer in model.layers: # All layers in model
dict = layer.get_config() # getting layer config info
name = dict['name'] # getting layer name
if name.find('input') > -1: # if layer is an input layer
feature_names_list.append(name.split('.')[-1])
size_ = dict['batch_input_shape'] # get layer size
num_features_list.append(size_[1])
feature_names_list.append('dragon7')
test_gen = uno_combined_data_generator.FromFileDataGenerator(df_data, test_indices,
target_str, feature_names_list, num_features_list,
batch_size=args.batch_size, shuffle=False)
return test_gen
def given_drugs(args, loader):
test_gen = uno_combined_data_generator.CombinedDataGenerator(loader, partition='test', batch_size=args.batch_size)
# Include specified drugs
include_drugs = uno.read_IDs_file(args.uq_infer_file)
df_response = test_gen.data.df_response
if np.isin('Drug', df_response.columns.values):
df = df_response[['Drug']]
index = df.index[df['Drug'].isin(include_drugs)]
else:
df = df_response[['Drug1', 'Drug2']]
index = df.index[df['Drug1'].isin(include_drugs) |
df['Drug2'].isin(include_drugs)]
# Update object
test_gen.index = index
test_gen.index_cycle = cycle(index)
test_gen.size = len(index)
test_gen.steps = np.ceil(test_gen.size / args.batch_size)
return test_gen
def given_cells(args, loader):
test_gen = uno_combined_data_generator.CombinedDataGenerator(loader, partition='test', batch_size=args.batch_size)
# Include specified cells
include_cells = uno.read_IDs_file(args.uq_infer_file)
df = test_gen.data.df_response[['Sample']]
index = df.index[df['Sample'].isin(include_cells)]
# Update object
test_gen.index = index
test_gen.index_cycle = cycle(index)
test_gen.size = len(index)
test_gen.steps = np.ceil(test_gen.size / args.batch_size)
return test_gen
def given_indices(args, loader):
test_gen = uno_combined_data_generator.CombinedDataGenerator(loader, partition='test', batch_size=args.batch_size)
# Include specified indices
index = uno.read_IDs_file(args.uq_infer_file)
# Update object
test_gen.index = index
test_gen.index_cycle = cycle(index)
test_gen.size = len(index)
test_gen.steps = np.ceil(test_gen.size / args.batch_size)
return test_gen
def run(params):
args = candle.ArgumentStruct(**params)
candle.set_seed(args.rng_seed)
logfile_def = 'uno_infer_from_' + args.uq_infer_file + '.log'
logfile = args.logfile if args.logfile else logfile_def
uno.set_up_logger(logfile, logger, uno.loggerUno, args.verbose)
logger.info('Params: {}'.format(params))
ext = uno.extension_from_parameters(args)
candle.verify_path(args.save_path)
prefix = args.save_path + 'uno' + ext
# Load trained model
candle.register_permanent_dropout()
model = keras.models.load_model(args.model_file, compile=False)
model.load_weights(args.weights_file)
logger.info('Loaded model:')
model.summary(print_fn=logger.info)
# Determine output to infer
target = args.agg_dose or 'Growth'
if (args.uq_infer_given_drugs or args.uq_infer_given_cells or args.uq_infer_given_indices):
loader = uno_combined_data_loader.CombinedDataLoader(args.rng_seed)
loader.load(cache=args.cache,
ncols=args.feature_subsample,
agg_dose=args.agg_dose,
cell_features=args.cell_features,
drug_features=args.drug_features,
drug_median_response_min=args.drug_median_response_min,
drug_median_response_max=args.drug_median_response_max,
use_landmark_genes=args.use_landmark_genes,
use_filtered_genes=args.use_filtered_genes,
cell_feature_subset_path=args.cell_feature_subset_path or args.feature_subset_path,
drug_feature_subset_path=args.drug_feature_subset_path or args.feature_subset_path,
preprocess_rnaseq=args.preprocess_rnaseq,
single=args.single,
train_sources=args.train_sources,
test_sources=args.test_sources,
embed_feature_source=not args.no_feature_source,
encode_response_source=not args.no_response_source,
)
if args.uq_infer_given_drugs:
test_gen = given_drugs(args, loader)
elif args.uq_infer_given_cells:
test_gen = given_cells(args, loader)
else:
test_gen = given_indices(args, loader)
else:
test_gen = from_file(args, model)
df_test = test_gen.get_response(copy=True)
y_test = df_test[target].values
for i in range(args.n_pred):
if args.no_gen:
x_test_list, y_test = test_gen.get_slice(size=test_gen.size, single=args.single)
y_test_pred = model.predict(x_test_list, batch_size=args.batch_size)
else:
test_gen.reset()
y_test_pred = model.predict_generator(test_gen.flow(single=args.single), test_gen.steps)
y_test_pred = y_test_pred[:test_gen.size]
if args.loss == 'heteroscedastic':
y_test_pred_ = y_test_pred[:,0]
s_test_pred = y_test_pred[:,1]
y_test_pred = y_test_pred_.flatten()
df_test['Predicted_'+target+'_'+str(i+1)] = y_test_pred
df_test['Pred_S_'+target+'_'+str(i+1)] = s_test_pred
pred_fname = prefix + '.predicted_INFER_HET.tsv'
elif args.loss == 'quantile':
y_test_pred_50q = y_test_pred[:,0]
y_test_pred_10q = y_test_pred[:,1]
y_test_pred_90q = y_test_pred[:,2]
y_test_pred = y_test_pred_50q.flatten() # 50th quantile prediction
df_test['Predicted_50q_'+target+'_'+str(i+1)] = y_test_pred
df_test['Predicted_10q_'+target+'_'+str(i+1)] = y_test_pred_10q.flatten()
df_test['Predicted_90q_'+target+'_'+str(i+1)] = y_test_pred_90q.flatten()
pred_fname = prefix + '.predicted_INFER_QTL.tsv'
else:
y_test_pred = y_test_pred.flatten()
df_test['Predicted_'+target+'_'+str(i+1)] = y_test_pred
pred_fname = prefix + '.predicted_INFER.tsv'
if args.n_pred < 21:
scores = uno.evaluate_prediction(y_test, y_test_pred)
uno.log_evaluation(scores, logger)
df_pred = df_test
if args.agg_dose:
if args.single:
df_pred.sort_values(['Sample', 'Drug1', target], inplace=True)
else:
df_pred.sort_values(['Sample', 'Drug1', 'Drug2', target], inplace=True)
else:
if args.single:
df_pred.sort_values(['Sample', 'Drug1', 'Dose1', 'Growth'], inplace=True)
else:
df_pred.sort_values(['Sample', 'Drug1', 'Drug2', 'Dose1', 'Dose2', 'Growth'], inplace=True)
df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g')
logger.info('Predictions stored in file: {}'.format(pred_fname))
if K.backend() == 'tensorflow':
K.clear_session()
logger.handlers = []
def main():
params = initialize_parameters()
run(params)
if __name__ == '__main__':
main()
if K.backend() == 'tensorflow':
K.clear_session()
| 33.890728
| 118
| 0.670933
|
542a7d7ee2abd164d771374b53223f56c0008c96
| 24,019
|
py
|
Python
|
Assets/Python/Plugins/Resources/Lib/calendar.py
|
OpenColonyShip/OpenColonyShip-Core
|
3dd8fd5c86ca89b1bf76d4dc4e0372f2c924d2ae
|
[
"MIT"
] | null | null | null |
Assets/Python/Plugins/Resources/Lib/calendar.py
|
OpenColonyShip/OpenColonyShip-Core
|
3dd8fd5c86ca89b1bf76d4dc4e0372f2c924d2ae
|
[
"MIT"
] | null | null | null |
Assets/Python/Plugins/Resources/Lib/calendar.py
|
OpenColonyShip/OpenColonyShip-Core
|
3dd8fd5c86ca89b1bf76d4dc4e0372f2c924d2ae
|
[
"MIT"
] | null | null | null |
"""Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import sys
import datetime
import locale as _locale
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return an iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in range(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
try:
date += oneday
except OverflowError:
# Adding one day could fail after datetime.MAXYEAR
break
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield (0, date.weekday())
else:
yield (date.day, date.weekday())
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield 0
else:
yield date.day
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print self.formatweek(theweek, width),
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print self.formatmonth(theyear, themonth, w, l),
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = range(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in range(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print self.formatyear(theyear, w, l, c, m)
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday"> </td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in range(January, January+12, width):
# months in this row
months = range(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title>\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class TimeEncoding:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
return _locale.getlocale(_locale.LC_TIME)[1]
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with TimeEncoding(self.locale) as encoding:
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
if encoding is not None:
name = name.decode(encoding)
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with TimeEncoding(self.locale) as encoding:
s = month_name[themonth]
if encoding is not None:
s = s.decode(encoding)
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with TimeEncoding(self.locale) as encoding:
s = day_abbr[day]
if encoding is not None:
s = s.decode(encoding)
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with TimeEncoding(self.locale) as encoding:
s = month_name[themonth]
if encoding is not None:
s = s.decode(encoding)
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
try:
firstweekday.__index__
except AttributeError:
raise IllegalWeekdayError(firstweekday)
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print formatstring(cols, colwidth, spacing)
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
parser.add_option(
"-w", "--width",
dest="width", type="int", default=2,
help="width of date column (default 2, text only)"
)
parser.add_option(
"-l", "--lines",
dest="lines", type="int", default=1,
help="number of lines for each week (default 1, text only)"
)
parser.add_option(
"-s", "--spacing",
dest="spacing", type="int", default=6,
help="spacing between months (default 6, text only)"
)
parser.add_option(
"-m", "--months",
dest="months", type="int", default=3,
help="months per row (default 3, text only)"
)
parser.add_option(
"-c", "--css",
dest="css", default="calendar.css",
help="CSS to use for page (html only)"
)
parser.add_option(
"-L", "--locale",
dest="locale", default=None,
help="locale to be used from month and weekday names"
)
parser.add_option(
"-e", "--encoding",
dest="encoding", default=None,
help="Encoding to use for output"
)
parser.add_option(
"-t", "--type",
dest="type", default="text",
choices=("text", "html"),
help="output type (text or html)"
)
(options, args) = parser.parse_args(args)
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
if len(args) == 1:
print cal.formatyearpage(datetime.date.today().year, **optdict)
elif len(args) == 2:
print cal.formatyearpage(int(args[1]), **optdict)
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if len(args) != 3:
optdict["c"] = options.spacing
optdict["m"] = options.months
if len(args) == 1:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif len(args) == 2:
result = cal.formatyear(int(args[1]), **optdict)
elif len(args) == 3:
result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
else:
parser.error("incorrect number of arguments")
sys.exit(1)
if options.encoding:
result = result.encode(options.encoding)
print result
if __name__ == "__main__":
main(sys.argv)
| 33.640056
| 125
| 0.559515
|
87e0587273f07a45d209c3760bad03c64753b18c
| 2,000
|
py
|
Python
|
scripts/practiceScripts/matrixplot.py
|
czbiohub/scRFE
|
716b0f59b4b949e6842af3080276c7ea835618a9
|
[
"MIT"
] | 11
|
2020-03-24T17:10:50.000Z
|
2021-09-08T22:56:16.000Z
|
scripts/practiceScripts/matrixplot.py
|
czbiohub/scRFE
|
716b0f59b4b949e6842af3080276c7ea835618a9
|
[
"MIT"
] | null | null | null |
scripts/practiceScripts/matrixplot.py
|
czbiohub/scRFE
|
716b0f59b4b949e6842af3080276c7ea835618a9
|
[
"MIT"
] | 1
|
2020-03-26T23:42:00.000Z
|
2020-03-26T23:42:00.000Z
|
#!/usr/bin/env python
# coding: utf-8
# # Visualtization: Matrix Plot
# In[1]:
# this is for comparing results from 3m and 24m data
# In[2]:
# imports
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import read_h5ad
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import RFE
from matplotlib import pyplot as plt
# In[3]:
# read in raw data
adata = read_h5ad('/Users/madelinepark/Downloads/Limb_Muscle_facs.h5ad')
# In[4]:
# read in results and sort by gini
results_24 = pd.read_csv('/Users/madelinepark/src2/maca-data-analysis/results_age_first_24m.csv')
results_sorted_24 = results_24.sort_values(by='24m_gini',ascending=False)
results_3 = pd.read_csv('/Users/madelinepark/src2/maca-data-analysis/results_age_first_3m.csv')
results_sorted_3 = results_3.sort_values(by='3m_gini',ascending=False)
# In[15]:
# take top genes and ginis, here we chose 10
results_top_24_gene = results_sorted_24['24m'][0:10]
results_top_24_gini = results_sorted_24['24m_gini'][0:10]
results_top_3_gene = results_sorted_3['3m'][0:10]
results_top_3_gini = results_sorted_3['3m_gini'][0:10]
# In[10]:
results_top_genes = list(set(results_top_gene_list) & set(adata.var_names.values))
# In[11]:
results_top_gene_list = []
results_top_gene_list.extend(results_top_24_gene)
results_top_gene_list.extend(results_top_3_gene)
# In[12]:
adatasubset = adata[adata.obs['age'].isin(['3m','24m'])]
# In[13]:
# Need to change the order of the ages
adatasubset.obs['age_num'] = adatasubset.obs['age']
adatasubset.obs['age_num'] = [an.split('m')[0] for an in adatasubset.obs['age_num']]
# In[14]:
sc.pl.matrixplot(adatasubset, results_top_genes,
groupby='age_num', dendrogram=False,log=True,cmap='Blues',save = '_top_30_droplet_test_8.pdf')
# In[ ]:
# In[ ]:
| 19.417476
| 109
| 0.751
|
f4ccbbbbbc60c3a9dd71dad45c1a9677a4293296
| 70
|
py
|
Python
|
countinggis/generators/every_other.py
|
Kodsport/nova-challenge-2018
|
e9d5e3d63a79c2191ca55f48438344d8b7719d90
|
[
"Apache-2.0"
] | 1
|
2019-09-13T13:38:16.000Z
|
2019-09-13T13:38:16.000Z
|
countinggis/generators/every_other.py
|
Kodsport/nova-challenge-2018
|
e9d5e3d63a79c2191ca55f48438344d8b7719d90
|
[
"Apache-2.0"
] | null | null | null |
countinggis/generators/every_other.py
|
Kodsport/nova-challenge-2018
|
e9d5e3d63a79c2191ca55f48438344d8b7719d90
|
[
"Apache-2.0"
] | null | null | null |
print(1000000, 500000)
s = [2*i + 2 for i in range(500000)]
print(*s)
| 17.5
| 36
| 0.642857
|
049b4c02dc2b5ab0d90e1f1138eed62962509344
| 340
|
py
|
Python
|
sympy/series/series.py
|
FabianBall/sympy
|
9d849ddfc45427fe7f6733ce4d18fa397d0f43a9
|
[
"BSD-3-Clause"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
sympy/series/series.py
|
FabianBall/sympy
|
9d849ddfc45427fe7f6733ce4d18fa397d0f43a9
|
[
"BSD-3-Clause"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
sympy/series/series.py
|
FabianBall/sympy
|
9d849ddfc45427fe7f6733ce4d18fa397d0f43a9
|
[
"BSD-3-Clause"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
from __future__ import print_function, division
from sympy.core.sympify import sympify
def series(expr, x=None, x0=0, n=6, dir="+"):
"""Series expansion of expr around point `x = x0`.
See the docstring of Expr.series() for complete details of this wrapper.
"""
expr = sympify(expr)
return expr.series(x, x0, n, dir)
| 26.153846
| 76
| 0.682353
|
006029c8a30dde2c007cc732603613bfbb1de875
| 854
|
py
|
Python
|
project_euler/problem_09/sol3.py
|
joeyzhou85/python
|
9c0cbe33076a570a3c02825b7c6d9866a760e777
|
[
"MIT"
] | 2
|
2019-07-30T18:26:58.000Z
|
2021-06-06T14:08:59.000Z
|
project_euler/problem_09/sol3.py
|
joeyzhou85/python
|
9c0cbe33076a570a3c02825b7c6d9866a760e777
|
[
"MIT"
] | null | null | null |
project_euler/problem_09/sol3.py
|
joeyzhou85/python
|
9c0cbe33076a570a3c02825b7c6d9866a760e777
|
[
"MIT"
] | 3
|
2019-04-29T02:36:37.000Z
|
2019-10-05T12:17:59.000Z
|
"""
Problem Statement:
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
from __future__ import print_function
def solution():
"""
Returns the product of a,b,c which are Pythagorean Triplet that satisfies
the following:
1. a**2 + b**2 = c**2
2. a + b + c = 1000
# The code below has been commented due to slow execution affecting Travis.
# >>> solution()
# 31875000
"""
return [
a * b * c
for a in range(1, 999)
for b in range(a, 999)
for c in range(b, 999)
if (a * a + b * b == c * c) and (a + b + c == 1000)
][0]
if __name__ == "__main__":
print(solution())
| 21.897436
| 79
| 0.567916
|
1b1bb76921f421f3fed5de32f31ef83ae7479f7c
| 1,230
|
py
|
Python
|
tie/processors.py
|
raphigaziano/TIE
|
ccf3ad938058557058fa4a2111ace539c62e1c43
|
[
"WTFPL"
] | null | null | null |
tie/processors.py
|
raphigaziano/TIE
|
ccf3ad938058557058fa4a2111ace539c62e1c43
|
[
"WTFPL"
] | null | null | null |
tie/processors.py
|
raphigaziano/TIE
|
ccf3ad938058557058fa4a2111ace539c62e1c43
|
[
"WTFPL"
] | 2
|
2016-02-18T01:51:00.000Z
|
2016-03-04T08:13:55.000Z
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Default Tag processing callbacks.
Callbacks should be callable objects (simple functions or objects with a
__call__ method) and must accept the following arguments:
match: A MatchObject instance, returned by regular expression string
matching.
**context: A dictionnary of context variables, provided as keyword
arguments.
After any processing is done, they should return the value to be injected as
a unicode string.
"""
import re
import warnings
from tie import utils, helpers
from tie.exceptions import ContextWarning
def sub(match, **context):
"""
Default tag processor.
Returns the appropriate value from **context for a matched tag.
"""
tag = helpers.get_single_group(match)
if re.search(r"\[.+\]|\.", tag):
# Attribute/Indice lookup
val = utils.unicode(eval(tag, {"__builtins__": None}, context))
else:
# Straight value
val = utils.unicode(context.get(tag, "")) # TODO: Error check
if not val and tag not in context.keys():
warnings.warn(
"No context variable matched the tag %s" % tag,
ContextWarning
)
return val
| 30
| 77
| 0.657724
|
fe6bff4fc20404c0e96ee83537fca7360624baf8
| 1,994
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CancelTaskRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CancelTaskRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CancelTaskRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CancelTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CancelTask','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 35.607143
| 74
| 0.769308
|
63944715e9ec6bb65b97eda301d88741c32f563e
| 26,153
|
py
|
Python
|
hummingbot/strategy/liquidity_mining/liquidity_mining.py
|
phbrgnomo/hummingbot
|
72382954a06a277248f44b321344186aef1c367c
|
[
"Apache-2.0"
] | 1
|
2021-07-01T20:56:32.000Z
|
2021-07-01T20:56:32.000Z
|
hummingbot/strategy/liquidity_mining/liquidity_mining.py
|
phbrgnomo/hummingbot
|
72382954a06a277248f44b321344186aef1c367c
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/strategy/liquidity_mining/liquidity_mining.py
|
phbrgnomo/hummingbot
|
72382954a06a277248f44b321344186aef1c367c
|
[
"Apache-2.0"
] | 1
|
2021-10-12T15:40:43.000Z
|
2021-10-12T15:40:43.000Z
|
from decimal import Decimal
import logging
import asyncio
from typing import Dict, List, Set
import pandas as pd
import numpy as np
from statistics import mean
from hummingbot.core.clock import Clock
from hummingbot.logger import HummingbotLogger
from hummingbot.strategy.strategy_py_base import StrategyPyBase
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from .data_types import Proposal, PriceSize
from hummingbot.core.event.events import OrderType, TradeType
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.utils.estimate_fee import estimate_fee
from hummingbot.strategy.pure_market_making.inventory_skew_calculator import (
calculate_bid_ask_ratios_from_base_asset_ratio
)
from hummingbot.connector.parrot import get_campaign_summary
from hummingbot.core.rate_oracle.rate_oracle import RateOracle
from hummingbot.strategy.utils import order_age
NaN = float("nan")
s_decimal_zero = Decimal(0)
s_decimal_nan = Decimal("NaN")
lms_logger = None
class LiquidityMiningStrategy(StrategyPyBase):
@classmethod
def logger(cls) -> HummingbotLogger:
global lms_logger
if lms_logger is None:
lms_logger = logging.getLogger(__name__)
return lms_logger
def __init__(self,
exchange: ExchangeBase,
market_infos: Dict[str, MarketTradingPairTuple],
token: str,
order_amount: Decimal,
spread: Decimal,
inventory_skew_enabled: bool,
target_base_pct: Decimal,
order_refresh_time: float,
order_refresh_tolerance_pct: Decimal,
inventory_range_multiplier: Decimal = Decimal("1"),
volatility_interval: int = 60 * 5,
avg_volatility_period: int = 10,
volatility_to_spread_multiplier: Decimal = Decimal("1"),
max_spread: Decimal = Decimal("-1"),
max_order_age: float = 60. * 60.,
status_report_interval: float = 900,
hb_app_notification: bool = False):
super().__init__()
self._exchange = exchange
self._market_infos = market_infos
self._token = token
self._order_amount = order_amount
self._spread = spread
self._order_refresh_time = order_refresh_time
self._order_refresh_tolerance_pct = order_refresh_tolerance_pct
self._inventory_skew_enabled = inventory_skew_enabled
self._target_base_pct = target_base_pct
self._inventory_range_multiplier = inventory_range_multiplier
self._volatility_interval = volatility_interval
self._avg_volatility_period = avg_volatility_period
self._volatility_to_spread_multiplier = volatility_to_spread_multiplier
self._max_spread = max_spread
self._max_order_age = max_order_age
self._ev_loop = asyncio.get_event_loop()
self._last_timestamp = 0
self._status_report_interval = status_report_interval
self._ready_to_trade = False
self._refresh_times = {market: 0 for market in market_infos}
self._token_balances = {}
self._sell_budgets = {}
self._buy_budgets = {}
self._mid_prices = {market: [] for market in market_infos}
self._volatility = {market: s_decimal_nan for market in self._market_infos}
self._last_vol_reported = 0.
self._hb_app_notification = hb_app_notification
self.add_markets([exchange])
@property
def active_orders(self):
"""
List active orders (they have been sent to the market and have not been cancelled yet)
"""
limit_orders = self.order_tracker.active_limit_orders
return [o[1] for o in limit_orders]
@property
def sell_budgets(self):
return self._sell_budgets
@property
def buy_budgets(self):
return self._buy_budgets
def tick(self, timestamp: float):
"""
Clock tick entry point, is run every second (on normal tick setting).
:param timestamp: current tick timestamp
"""
if not self._ready_to_trade:
# Check if there are restored orders, they should be canceled before strategy starts.
self._ready_to_trade = self._exchange.ready and len(self._exchange.limit_orders) == 0
if not self._exchange.ready:
self.logger().warning(f"{self._exchange.name} is not ready. Please wait...")
return
else:
self.logger().info(f"{self._exchange.name} is ready. Trading started.")
self.create_budget_allocation()
self.update_mid_prices()
self.update_volatility()
proposals = self.create_base_proposals()
self._token_balances = self.adjusted_available_balances()
if self._inventory_skew_enabled:
self.apply_inventory_skew(proposals)
self.apply_budget_constraint(proposals)
self.cancel_active_orders(proposals)
self.execute_orders_proposal(proposals)
self._last_timestamp = timestamp
async def active_orders_df(self) -> pd.DataFrame:
"""
Return the active orders in a DataFrame.
"""
size_q_col = f"Amt({self._token})" if self.is_token_a_quote_token() else "Amt(Quote)"
columns = ["Market", "Side", "Price", "Spread", "Amount", size_q_col, "Age"]
data = []
for order in self.active_orders:
mid_price = self._market_infos[order.trading_pair].get_mid_price()
spread = 0 if mid_price == 0 else abs(order.price - mid_price) / mid_price
size_q = order.quantity * mid_price
age = order_age(order)
# // indicates order is a paper order so 'n/a'. For real orders, calculate age.
age_txt = "n/a" if age <= 0. else pd.Timestamp(age, unit='s').strftime('%H:%M:%S')
data.append([
order.trading_pair,
"buy" if order.is_buy else "sell",
float(order.price),
f"{spread:.2%}",
float(order.quantity),
float(size_q),
age_txt
])
df = pd.DataFrame(data=data, columns=columns)
df.sort_values(by=["Market", "Side"], inplace=True)
return df
def budget_status_df(self) -> pd.DataFrame:
"""
Return the trader's budget in a DataFrame
"""
data = []
columns = ["Market", f"Budget({self._token})", "Base bal", "Quote bal", "Base/Quote"]
for market, market_info in self._market_infos.items():
mid_price = market_info.get_mid_price()
base_bal = self._sell_budgets[market]
quote_bal = self._buy_budgets[market]
total_bal_in_quote = (base_bal * mid_price) + quote_bal
total_bal_in_token = total_bal_in_quote
if not self.is_token_a_quote_token():
total_bal_in_token = base_bal + (quote_bal / mid_price)
base_pct = (base_bal * mid_price) / total_bal_in_quote if total_bal_in_quote > 0 else s_decimal_zero
quote_pct = quote_bal / total_bal_in_quote if total_bal_in_quote > 0 else s_decimal_zero
data.append([
market,
float(total_bal_in_token),
float(base_bal),
float(quote_bal),
f"{base_pct:.0%} / {quote_pct:.0%}"
])
df = pd.DataFrame(data=data, columns=columns).replace(np.nan, '', regex=True)
df.sort_values(by=["Market"], inplace=True)
return df
def market_status_df(self) -> pd.DataFrame:
"""
Return the market status (prices, volatility) in a DataFrame
"""
data = []
columns = ["Market", "Mid price", "Best bid", "Best ask", "Volatility"]
for market, market_info in self._market_infos.items():
mid_price = market_info.get_mid_price()
best_bid = self._exchange.get_price(market, False)
best_ask = self._exchange.get_price(market, True)
best_bid_pct = abs(best_bid - mid_price) / mid_price
best_ask_pct = (best_ask - mid_price) / mid_price
data.append([
market,
float(mid_price),
f"{best_bid_pct:.2%}",
f"{best_ask_pct:.2%}",
"" if self._volatility[market].is_nan() else f"{self._volatility[market]:.2%}",
])
df = pd.DataFrame(data=data, columns=columns).replace(np.nan, '', regex=True)
df.sort_values(by=["Market"], inplace=True)
return df
async def miner_status_df(self) -> pd.DataFrame:
"""
Return the miner status (payouts, rewards, liquidity, etc.) in a DataFrame
"""
data = []
g_sym = RateOracle.global_token_symbol
columns = ["Market", "Payout", "Reward/wk", "Liquidity", "Yield/yr", "Max spread"]
campaigns = await get_campaign_summary(self._exchange.display_name, list(self._market_infos.keys()))
for market, campaign in campaigns.items():
reward = await RateOracle.global_value(campaign.payout_asset, campaign.reward_per_wk)
data.append([
market,
campaign.payout_asset,
f"{g_sym}{reward:.0f}",
f"{g_sym}{campaign.liquidity_usd:.0f}",
f"{campaign.apy:.2%}",
f"{campaign.spread_max:.2%}%"
])
df = pd.DataFrame(data=data, columns=columns).replace(np.nan, '', regex=True)
df.sort_values(by=["Market"], inplace=True)
return df
async def format_status(self) -> str:
"""
Return the budget, market, miner and order statuses.
"""
if not self._ready_to_trade:
return "Market connectors are not ready."
lines = []
warning_lines = []
warning_lines.extend(self.network_warning(list(self._market_infos.values())))
budget_df = self.budget_status_df()
lines.extend(["", " Budget:"] + [" " + line for line in budget_df.to_string(index=False).split("\n")])
market_df = self.market_status_df()
lines.extend(["", " Markets:"] + [" " + line for line in market_df.to_string(index=False).split("\n")])
miner_df = await self.miner_status_df()
if not miner_df.empty:
lines.extend(["", " Miner:"] + [" " + line for line in miner_df.to_string(index=False).split("\n")])
# See if there are any open orders.
if len(self.active_orders) > 0:
df = await self.active_orders_df()
lines.extend(["", " Orders:"] + [" " + line for line in df.to_string(index=False).split("\n")])
else:
lines.extend(["", " No active maker orders."])
warning_lines.extend(self.balance_warning(list(self._market_infos.values())))
if len(warning_lines) > 0:
lines.extend(["", "*** WARNINGS ***"] + warning_lines)
return "\n".join(lines)
def start(self, clock: Clock, timestamp: float):
restored_orders = self._exchange.limit_orders
for order in restored_orders:
self._exchange.cancel(order.trading_pair, order.client_order_id)
def stop(self, clock: Clock):
pass
def create_base_proposals(self):
"""
Each tick this strategy creates a set of proposals based on the market_info and the parameters from the
constructor.
"""
proposals = []
for market, market_info in self._market_infos.items():
spread = self._spread
if not self._volatility[market].is_nan():
# volatility applies only when it is higher than the spread setting.
spread = max(spread, self._volatility[market] * self._volatility_to_spread_multiplier)
if self._max_spread > s_decimal_zero:
spread = min(spread, self._max_spread)
mid_price = market_info.get_mid_price()
buy_price = mid_price * (Decimal("1") - spread)
buy_price = self._exchange.quantize_order_price(market, buy_price)
buy_size = self.base_order_size(market, buy_price)
sell_price = mid_price * (Decimal("1") + spread)
sell_price = self._exchange.quantize_order_price(market, sell_price)
sell_size = self.base_order_size(market, sell_price)
proposals.append(Proposal(market, PriceSize(buy_price, buy_size), PriceSize(sell_price, sell_size)))
return proposals
def total_port_value_in_token(self) -> Decimal:
"""
Total portfolio value in self._token amount
"""
all_bals = self.adjusted_available_balances()
port_value = all_bals.get(self._token, s_decimal_zero)
for market, market_info in self._market_infos.items():
base, quote = market.split("-")
if self.is_token_a_quote_token():
port_value += all_bals[base] * market_info.get_mid_price()
else:
port_value += all_bals[quote] / market_info.get_mid_price()
return port_value
def create_budget_allocation(self):
"""
Create buy and sell budgets for every market
"""
self._sell_budgets = {m: s_decimal_zero for m in self._market_infos}
self._buy_budgets = {m: s_decimal_zero for m in self._market_infos}
portfolio_value = self.total_port_value_in_token()
market_portion = portfolio_value / len(self._market_infos)
balances = self.adjusted_available_balances()
for market, market_info in self._market_infos.items():
base, quote = market.split("-")
if self.is_token_a_quote_token():
self._sell_budgets[market] = balances[base]
buy_budget = market_portion - (balances[base] * market_info.get_mid_price())
if buy_budget > s_decimal_zero:
self._buy_budgets[market] = buy_budget
else:
self._buy_budgets[market] = balances[quote]
sell_budget = market_portion - (balances[quote] / market_info.get_mid_price())
if sell_budget > s_decimal_zero:
self._sell_budgets[market] = sell_budget
def base_order_size(self, trading_pair: str, price: Decimal = s_decimal_zero):
base, quote = trading_pair.split("-")
if self._token == base:
return self._order_amount
if price == s_decimal_zero:
price = self._market_infos[trading_pair].get_mid_price()
return self._order_amount / price
def apply_budget_constraint(self, proposals: List[Proposal]):
balances = self._token_balances.copy()
for proposal in proposals:
if balances[proposal.base()] < proposal.sell.size:
proposal.sell.size = balances[proposal.base()]
proposal.sell.size = self._exchange.quantize_order_amount(proposal.market, proposal.sell.size)
balances[proposal.base()] -= proposal.sell.size
quote_size = proposal.buy.size * proposal.buy.price
quote_size = balances[proposal.quote()] if balances[proposal.quote()] < quote_size else quote_size
buy_fee = estimate_fee(self._exchange.name, True)
buy_size = quote_size / (proposal.buy.price * (Decimal("1") + buy_fee.percent))
proposal.buy.size = self._exchange.quantize_order_amount(proposal.market, buy_size)
balances[proposal.quote()] -= quote_size
def is_within_tolerance(self, cur_orders: List[LimitOrder], proposal: Proposal):
"""
False if there are no buys or sells or if the difference between the proposed price and current price is less
than the tolerance. The tolerance value is strict max, cannot be equal.
"""
cur_buy = [o for o in cur_orders if o.is_buy]
cur_sell = [o for o in cur_orders if not o.is_buy]
if (cur_buy and proposal.buy.size <= 0) or (cur_sell and proposal.sell.size <= 0):
return False
if cur_buy and \
abs(proposal.buy.price - cur_buy[0].price) / cur_buy[0].price > self._order_refresh_tolerance_pct:
return False
if cur_sell and \
abs(proposal.sell.price - cur_sell[0].price) / cur_sell[0].price > self._order_refresh_tolerance_pct:
return False
return True
def cancel_active_orders(self, proposals: List[Proposal]):
"""
Cancel any orders that have an order age greater than self._max_order_age or if orders are not within tolerance
"""
for proposal in proposals:
to_cancel = False
cur_orders = [o for o in self.active_orders if o.trading_pair == proposal.market]
if cur_orders and any(order_age(o) > self._max_order_age for o in cur_orders):
to_cancel = True
elif self._refresh_times[proposal.market] <= self.current_timestamp and \
cur_orders and not self.is_within_tolerance(cur_orders, proposal):
to_cancel = True
if to_cancel:
for order in cur_orders:
self.cancel_order(self._market_infos[proposal.market], order.client_order_id)
# To place new order on the next tick
self._refresh_times[order.trading_pair] = self.current_timestamp + 0.1
def execute_orders_proposal(self, proposals: List[Proposal]):
"""
Execute a list of proposals if the current timestamp is less than its refresh timestamp.
Update the refresh timestamp.
"""
for proposal in proposals:
cur_orders = [o for o in self.active_orders if o.trading_pair == proposal.market]
if cur_orders or self._refresh_times[proposal.market] > self.current_timestamp:
continue
mid_price = self._market_infos[proposal.market].get_mid_price()
spread = s_decimal_zero
if proposal.buy.size > 0:
spread = abs(proposal.buy.price - mid_price) / mid_price
self.logger().info(f"({proposal.market}) Creating a bid order {proposal.buy} value: "
f"{proposal.buy.size * proposal.buy.price:.2f} {proposal.quote()} spread: "
f"{spread:.2%}")
self.buy_with_specific_market(
self._market_infos[proposal.market],
proposal.buy.size,
order_type=OrderType.LIMIT_MAKER,
price=proposal.buy.price
)
if proposal.sell.size > 0:
spread = abs(proposal.sell.price - mid_price) / mid_price
self.logger().info(f"({proposal.market}) Creating an ask order at {proposal.sell} value: "
f"{proposal.sell.size * proposal.sell.price:.2f} {proposal.quote()} spread: "
f"{spread:.2%}")
self.sell_with_specific_market(
self._market_infos[proposal.market],
proposal.sell.size,
order_type=OrderType.LIMIT_MAKER,
price=proposal.sell.price
)
if proposal.buy.size > 0 or proposal.sell.size > 0:
if not self._volatility[proposal.market].is_nan() and spread > self._spread:
adjusted_vol = self._volatility[proposal.market] * self._volatility_to_spread_multiplier
if adjusted_vol > self._spread:
self.logger().info(f"({proposal.market}) Spread is widened to {spread:.2%} due to high "
f"market volatility")
self._refresh_times[proposal.market] = self.current_timestamp + self._order_refresh_time
def is_token_a_quote_token(self):
"""
Check if self._token is a quote token
"""
quotes = self.all_quote_tokens()
if len(quotes) == 1 and self._token in quotes:
return True
return False
def all_base_tokens(self) -> Set[str]:
"""
Get the base token (left-hand side) from all markets in this strategy
"""
tokens = set()
for market in self._market_infos:
tokens.add(market.split("-")[0])
return tokens
def all_quote_tokens(self) -> Set[str]:
"""
Get the quote token (right-hand side) from all markets in this strategy
"""
tokens = set()
for market in self._market_infos:
tokens.add(market.split("-")[1])
return tokens
def all_tokens(self) -> Set[str]:
"""
Return a list of all tokens involved in this strategy (base and quote)
"""
tokens = set()
for market in self._market_infos:
tokens.update(market.split("-"))
return tokens
def adjusted_available_balances(self) -> Dict[str, Decimal]:
"""
Calculates all available balances, account for amount attributed to orders and reserved balance.
:return: a dictionary of token and its available balance
"""
tokens = self.all_tokens()
adjusted_bals = {t: s_decimal_zero for t in tokens}
total_bals = {t: s_decimal_zero for t in tokens}
total_bals.update(self._exchange.get_all_balances())
for token in tokens:
adjusted_bals[token] = self._exchange.get_available_balance(token)
for order in self.active_orders:
base, quote = order.trading_pair.split("-")
if order.is_buy:
adjusted_bals[quote] += order.quantity * order.price
else:
adjusted_bals[base] += order.quantity
return adjusted_bals
def apply_inventory_skew(self, proposals: List[Proposal]):
"""
Apply an inventory split between the quote and base asset
"""
for proposal in proposals:
buy_budget = self._buy_budgets[proposal.market]
sell_budget = self._sell_budgets[proposal.market]
mid_price = self._market_infos[proposal.market].get_mid_price()
total_order_size = proposal.sell.size + proposal.buy.size
bid_ask_ratios = calculate_bid_ask_ratios_from_base_asset_ratio(
float(sell_budget),
float(buy_budget),
float(mid_price),
float(self._target_base_pct),
float(total_order_size * self._inventory_range_multiplier)
)
proposal.buy.size *= Decimal(bid_ask_ratios.bid_ratio)
proposal.sell.size *= Decimal(bid_ask_ratios.ask_ratio)
def did_fill_order(self, event):
"""
Check if order has been completed, log it, notify the hummingbot application, and update budgets.
"""
order_id = event.order_id
market_info = self.order_tracker.get_shadow_market_pair_from_order_id(order_id)
if market_info is not None:
if event.trade_type is TradeType.BUY:
msg = f"({market_info.trading_pair}) Maker BUY order (price: {event.price}) of {event.amount} " \
f"{market_info.base_asset} is filled."
self.log_with_clock(logging.INFO, msg)
self.notify_hb_app_with_timestamp(msg)
self._buy_budgets[market_info.trading_pair] -= (event.amount * event.price)
self._sell_budgets[market_info.trading_pair] += event.amount
else:
msg = f"({market_info.trading_pair}) Maker SELL order (price: {event.price}) of {event.amount} " \
f"{market_info.base_asset} is filled."
self.log_with_clock(logging.INFO, msg)
self.notify_hb_app_with_timestamp(msg)
self._sell_budgets[market_info.trading_pair] -= event.amount
self._buy_budgets[market_info.trading_pair] += (event.amount * event.price)
def update_mid_prices(self):
"""
Query asset markets for mid price
"""
for market in self._market_infos:
mid_price = self._market_infos[market].get_mid_price()
self._mid_prices[market].append(mid_price)
# To avoid memory leak, we store only the last part of the list needed for volatility calculation
max_len = self._volatility_interval * self._avg_volatility_period
self._mid_prices[market] = self._mid_prices[market][-1 * max_len:]
def update_volatility(self):
"""
Update volatility data from the market
"""
self._volatility = {market: s_decimal_nan for market in self._market_infos}
for market, mid_prices in self._mid_prices.items():
last_index = len(mid_prices) - 1
atr = []
first_index = last_index - (self._volatility_interval * self._avg_volatility_period)
first_index = max(first_index, 0)
for i in range(last_index, first_index, self._volatility_interval * -1):
prices = mid_prices[i - self._volatility_interval + 1: i + 1]
if not prices:
break
atr.append((max(prices) - min(prices)) / min(prices))
if atr:
self._volatility[market] = mean(atr)
if self._last_vol_reported < self.current_timestamp - self._volatility_interval:
for market, vol in self._volatility.items():
if not vol.is_nan():
self.logger().info(f"{market} volatility: {vol:.2%}")
self._last_vol_reported = self.current_timestamp
def notify_hb_app(self, msg: str):
"""
Send a message to the hummingbot application
"""
if self._hb_app_notification:
super().notify_hb_app(msg)
| 46.288496
| 119
| 0.61622
|
4e476557f705d88fc4424f85b7e9674bad23cea7
| 3,424
|
py
|
Python
|
chowtest/__init__.py
|
jkclem/chowtest
|
f73f185dadcbb14e567ce65f942e6c8bdb432d4d
|
[
"MIT"
] | 8
|
2019-09-25T10:32:27.000Z
|
2022-03-29T17:08:07.000Z
|
chowtest/__init__.py
|
jkclem/chowtest
|
f73f185dadcbb14e567ce65f942e6c8bdb432d4d
|
[
"MIT"
] | 2
|
2021-03-03T13:19:28.000Z
|
2022-03-21T18:04:27.000Z
|
chowtest/__init__.py
|
jkclem/chowtest
|
f73f185dadcbb14e567ce65f942e6c8bdb432d4d
|
[
"MIT"
] | 5
|
2020-02-19T18:08:09.000Z
|
2022-03-07T14:52:27.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 12:02:06 2019
@author: jkcle
"""
# defines a function to get performance information about a linear regression using sklearn
def linear_residuals(X, y):
# imports pandas as pd
import pandas as pd
# imports numpy as np
import numpy as np
# imports the linear regression function from sklearn as lr
from sklearn.linear_model import LinearRegression as lr
# fits the linear model
model = lr().fit(X, y)
# creates a dataframe with the predicted y in a column called y_hat
summary_result = pd.DataFrame(columns = ['y_hat'])
yhat_list = [float(i[0]) for i in np.ndarray.tolist(model.predict(X))]
summary_result['y_hat'] = yhat_list
# saves the actual y values in the y_actual column
summary_result['y_actual'] = y.values
# calculates the residuals
summary_result['residuals'] = summary_result.y_actual - summary_result.y_hat
# squares the residuals
summary_result['residuals_sq'] = summary_result.residuals ** 2
return(summary_result)
# defines a function to return the sum of squares of a linear regression, where X is a
# pandas dataframe of the independent variables and y is a pandas dataframe of the dependent
# variable
def calculate_RSS(X, y):
# calls the linear_residual function
resid_data = linear_residuals(X, y)
# calculates the sum of squared resiudals
rss = resid_data.residuals_sq.sum()
# returns the sum of squared residuals
return(rss)
# defines a function to return the p-value from a Chow Test
def ChowTest(X, y, last_index_in_model_1, first_index_in_model_2):
# gets the RSS for the entire period
rss_pooled = calculate_RSS(X, y)
# splits the X and y dataframes and gets the rows from the first row in the dataframe
# to the last row in the model 1 testing period and then calculates the RSS
X1 = X.loc[:last_index_in_model_1]
y1 = y.loc[:last_index_in_model_1]
rss1 = calculate_RSS(X1, y1)
# splits the X and y dataframes and gets the rows from the first row in the model 2
# testing period to the last row in the dataframe and then calculates the RSS
X2 = X.loc[first_index_in_model_2:]
y2 = y.loc[first_index_in_model_2:]
rss2 = calculate_RSS(X2, y2)
# gets the number of independent variables, plus 1 for the constant in the regression
k = X.shape[1] + 1
# gets the number of observations in the first period
N1 = X1.shape[0]
# gets the number of observations in the second period
N2 = X2.shape[0]
# calculates the numerator of the Chow Statistic
numerator = (rss_pooled - (rss1 + rss2)) / k
# calculates the denominator of the Chow Statistic
denominator = (rss1 + rss2) / (N1 + N2 - 2 * k)
# calculates the Chow Statistic
Chow_Stat = numerator / denominator
# Chow statistics are distributed in a F-distribution with k and N1 + N2 - 2k degrees of
# freedom
from scipy.stats import f
# calculates the p-value by subtracting 1 by the cumulative probability at the Chow
# statistic from an F-distribution with k and N1 + N2 - 2k degrees of freedom
p_value = 1 - f.cdf(Chow_Stat, dfn = 5, dfd = (N1 + N2 - 2 * k))
# saves the Chow_State and p_value in a tuple
result = (Chow_Stat, p_value)
# returns the p-value
return(result)
| 35.298969
| 92
| 0.688084
|
a550a89114635dd13627a919d82726511c31d624
| 23,535
|
py
|
Python
|
yapf/yapflib/style.py
|
hugo-ricateau/yapf
|
033bc1421a940b1fde8b91311f477928935a3f05
|
[
"Apache-2.0"
] | null | null | null |
yapf/yapflib/style.py
|
hugo-ricateau/yapf
|
033bc1421a940b1fde8b91311f477928935a3f05
|
[
"Apache-2.0"
] | null | null | null |
yapf/yapflib/style.py
|
hugo-ricateau/yapf
|
033bc1421a940b1fde8b91311f477928935a3f05
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python formatting style settings."""
import os
import re
import textwrap
from yapf.yapflib import errors
from yapf.yapflib import py3compat
class StyleConfigError(errors.YapfError):
"""Raised when there's a problem reading the style configuration."""
pass
def Get(setting_name):
"""Get a style setting."""
return _style[setting_name]
def Help():
"""Return dict mapping style names to help strings."""
return _STYLE_HELP
def SetGlobalStyle(style):
"""Set a style dict."""
global _style
global _GLOBAL_STYLE_FACTORY
factory = _GetStyleFactory(style)
if factory:
_GLOBAL_STYLE_FACTORY = factory
_style = style
_STYLE_HELP = dict(
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent("""\
Align closing bracket with visual indentation."""),
ALLOW_MULTILINE_LAMBDAS=textwrap.dedent("""\
Allow lambdas to be formatted on more than one line."""),
ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent("""\
Allow dictionary keys to exist on multiple lines. For example:
x = {
('this is the first element of a tuple',
'this is the second element of a tuple'):
value,
}"""),
ALLOW_SPLIT_BEFORE_DICT_VALUE=textwrap.dedent("""\
Allow splits before the dictionary value."""),
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=textwrap.dedent("""\
Insert a blank line before a 'def' or 'class' immediately nested
within another 'def' or 'class'. For example:
class Foo:
# <------ this blank line
def method():
..."""),
BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent("""\
Insert a blank line before a class-level docstring."""),
BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent("""\
Insert a blank line before a module docstring."""),
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent("""\
Number of blank lines surrounding top-level function and class
definitions."""),
COALESCE_BRACKETS=textwrap.dedent("""\
Do not split consecutive brackets. Only relevant when
dedent_closing_brackets is set. For example:
call_func_that_takes_a_dict(
{
'key1': 'value1',
'key2': 'value2',
}
)
would reformat to:
call_func_that_takes_a_dict({
'key1': 'value1',
'key2': 'value2',
})"""),
COLUMN_LIMIT=textwrap.dedent("""\
The column limit."""),
CONTINUATION_ALIGN_STYLE=textwrap.dedent("""\
The style for continuation alignment. Possible values are:
- SPACE: Use spaces for continuation alignment. This is default behavior.
- FIXED: Use fixed number (CONTINUATION_INDENT_WIDTH) of columns
(ie: CONTINUATION_INDENT_WIDTH/INDENT_WIDTH tabs) for continuation
alignment.
- LESS: Slightly left if cannot vertically align continuation lines with
indent characters.
- VALIGN-RIGHT: Vertically align continuation lines with indent
characters. Slightly right (one more indent character) if cannot
vertically align continuation lines with indent characters.
For options FIXED, and VALIGN-RIGHT are only available when USE_TABS is
enabled."""),
CONTINUATION_INDENT_WIDTH=textwrap.dedent("""\
Indent width used for line continuations."""),
DEDENT_CLOSING_BRACKETS=textwrap.dedent("""\
Put closing brackets on a separate line, dedented, if the bracketed
expression can't fit in a single line. Applies to all kinds of brackets,
including function definitions and calls. For example:
config = {
'key1': 'value1',
'key2': 'value2',
} # <--- this bracket is dedented and on a separate line
time_series = self.remote_client.query_entity_counters(
entity='dev3246.region1',
key='dns.query_latency_tcp',
transform=Transformation.AVERAGE(window=timedelta(seconds=60)),
start_ts=now()-timedelta(days=3),
end_ts=now(),
) # <--- this bracket is dedented and on a separate line"""),
DISABLE_ENDING_COMMA_HEURISTIC=textwrap.dedent("""\
Disable the heuristic which places each list element on a separate line
if the list is comma-terminated."""),
EACH_DICT_ENTRY_ON_SEPARATE_LINE=textwrap.dedent("""\
Place each dictionary entry onto its own line."""),
I18N_COMMENT=textwrap.dedent("""\
The regex for an i18n comment. The presence of this comment stops
reformatting of that line, because the comments are required to be
next to the string they translate."""),
I18N_FUNCTION_CALL=textwrap.dedent("""\
The i18n function call names. The presence of this function stops
reformattting on that line, because the string it has cannot be moved
away from the i18n comment."""),
INDENT_DICTIONARY_VALUE=textwrap.dedent("""\
Indent the dictionary value if it cannot fit on the same line as the
dictionary key. For example:
config = {
'key1':
'value1',
'key2': value1 +
value2,
}"""),
INDENT_WIDTH=textwrap.dedent("""\
The number of columns to use for indentation."""),
INDENT_BLANK_LINES=textwrap.dedent("""\
Indent blank lines."""),
JOIN_MULTIPLE_LINES=textwrap.dedent("""\
Join short lines into one line. E.g., single line 'if' statements."""),
NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=textwrap.dedent("""\
Do not include spaces around selected binary operators. For example:
1 + 2 * 3 - 4 / 5
will be formatted as follows when configured with *,/:
1 + 2*3 - 4/5
"""),
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=textwrap.dedent("""\
Insert a space between the ending comma and closing bracket of a list,
etc."""),
SPACES_AROUND_POWER_OPERATOR=textwrap.dedent("""\
Use spaces around the power operator."""),
SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=textwrap.dedent("""\
Use spaces around default or named assigns."""),
SPACES_BEFORE_COMMENT=textwrap.dedent("""\
The number of spaces required before a trailing comment."""),
SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=textwrap.dedent("""\
Split before arguments if the argument list is terminated by a
comma."""),
SPLIT_ALL_COMMA_SEPARATED_VALUES=textwrap.dedent("""\
Split before arguments"""),
SPLIT_BEFORE_BITWISE_OPERATOR=textwrap.dedent("""\
Set to True to prefer splitting before '&', '|' or '^' rather than
after."""),
SPLIT_BEFORE_CLOSING_BRACKET=textwrap.dedent("""\
Split before the closing bracket if a list or dict literal doesn't fit on
a single line."""),
SPLIT_BEFORE_DICT_SET_GENERATOR=textwrap.dedent("""\
Split before a dictionary or set generator (comp_for). For example, note
the split before the 'for':
foo = {
variable: 'Hello world, have a nice day!'
for variable in bar if variable != 42
}"""),
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=textwrap.dedent("""\
Split after the opening paren which surrounds an expression if it doesn't
fit on a single line.
"""),
SPLIT_BEFORE_FIRST_ARGUMENT=textwrap.dedent("""\
If an argument / parameter list is going to be split, then split before
the first argument."""),
SPLIT_BEFORE_LOGICAL_OPERATOR=textwrap.dedent("""\
Set to True to prefer splitting before 'and' or 'or' rather than
after."""),
SPLIT_BEFORE_NAMED_ASSIGNS=textwrap.dedent("""\
Split named assignments onto individual lines."""),
SPLIT_COMPLEX_COMPREHENSION=textwrap.dedent("""\
Set to True to split list comprehensions and generators that have
non-trivial expressions and multiple clauses before each of these
clauses. For example:
result = [
a_long_var + 100 for a_long_var in xrange(1000)
if a_long_var % 10]
would reformat to something like:
result = [
a_long_var + 100
for a_long_var in xrange(1000)
if a_long_var % 10]
"""),
SPLIT_PENALTY_AFTER_OPENING_BRACKET=textwrap.dedent("""\
The penalty for splitting right after the opening bracket."""),
SPLIT_PENALTY_AFTER_UNARY_OPERATOR=textwrap.dedent("""\
The penalty for splitting the line after a unary operator."""),
SPLIT_PENALTY_BEFORE_IF_EXPR=textwrap.dedent("""\
The penalty for splitting right before an if expression."""),
SPLIT_PENALTY_BITWISE_OPERATOR=textwrap.dedent("""\
The penalty of splitting the line around the '&', '|', and '^'
operators."""),
SPLIT_PENALTY_COMPREHENSION=textwrap.dedent("""\
The penalty for splitting a list comprehension or generator
expression."""),
SPLIT_PENALTY_EXCESS_CHARACTER=textwrap.dedent("""\
The penalty for characters over the column limit."""),
SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=textwrap.dedent("""\
The penalty incurred by adding a line split to the unwrapped line. The
more line splits added the higher the penalty."""),
SPLIT_PENALTY_IMPORT_NAMES=textwrap.dedent("""\
The penalty of splitting a list of "import as" names. For example:
from a_very_long_or_indented_module_name_yada_yad import (long_argument_1,
long_argument_2,
long_argument_3)
would reformat to something like:
from a_very_long_or_indented_module_name_yada_yad import (
long_argument_1, long_argument_2, long_argument_3)
"""),
SPLIT_PENALTY_LOGICAL_OPERATOR=textwrap.dedent("""\
The penalty of splitting the line around the 'and' and 'or'
operators."""),
USE_TABS=textwrap.dedent("""\
Use the Tab character for indentation."""),
# BASED_ON_STYLE='Which predefined style this style is based on',
)
def CreatePEP8Style():
return dict(
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True,
ALLOW_MULTILINE_LAMBDAS=False,
ALLOW_MULTILINE_DICTIONARY_KEYS=False,
ALLOW_SPLIT_BEFORE_DICT_VALUE=True,
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=False,
BLANK_LINE_BEFORE_CLASS_DOCSTRING=False,
BLANK_LINE_BEFORE_MODULE_DOCSTRING=False,
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=2,
COALESCE_BRACKETS=False,
COLUMN_LIMIT=79,
CONTINUATION_ALIGN_STYLE='SPACE',
CONTINUATION_INDENT_WIDTH=4,
DEDENT_CLOSING_BRACKETS=False,
DISABLE_ENDING_COMMA_HEURISTIC=False,
EACH_DICT_ENTRY_ON_SEPARATE_LINE=True,
I18N_COMMENT='',
I18N_FUNCTION_CALL='',
INDENT_DICTIONARY_VALUE=False,
INDENT_WIDTH=4,
INDENT_BLANK_LINES=False,
JOIN_MULTIPLE_LINES=True,
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=True,
SPACES_AROUND_POWER_OPERATOR=False,
NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=set(),
SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=False,
SPACES_BEFORE_COMMENT=2,
SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=False,
SPLIT_ALL_COMMA_SEPARATED_VALUES=False,
SPLIT_BEFORE_BITWISE_OPERATOR=True,
SPLIT_BEFORE_CLOSING_BRACKET=True,
SPLIT_BEFORE_DICT_SET_GENERATOR=True,
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=False,
SPLIT_BEFORE_FIRST_ARGUMENT=False,
SPLIT_BEFORE_LOGICAL_OPERATOR=True,
SPLIT_BEFORE_NAMED_ASSIGNS=True,
SPLIT_COMPLEX_COMPREHENSION=False,
SPLIT_PENALTY_AFTER_OPENING_BRACKET=30,
SPLIT_PENALTY_AFTER_UNARY_OPERATOR=10000,
SPLIT_PENALTY_BEFORE_IF_EXPR=0,
SPLIT_PENALTY_BITWISE_OPERATOR=300,
SPLIT_PENALTY_COMPREHENSION=80,
SPLIT_PENALTY_EXCESS_CHARACTER=4500,
SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=30,
SPLIT_PENALTY_IMPORT_NAMES=0,
SPLIT_PENALTY_LOGICAL_OPERATOR=300,
USE_TABS=False,
)
def CreateGoogleStyle():
style = CreatePEP8Style()
style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False
style['BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'] = True
style['COLUMN_LIMIT'] = 80
style['INDENT_WIDTH'] = 4
style['I18N_COMMENT'] = r'#\..*'
style['I18N_FUNCTION_CALL'] = ['N_', '_']
style['SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET'] = False
style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False
style['SPLIT_BEFORE_DICT_SET_GENERATOR'] = False
style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False
style['SPLIT_COMPLEX_COMPREHENSION'] = True
style['SPLIT_PENALTY_COMPREHENSION'] = 2100
return style
def CreateChromiumStyle():
style = CreateGoogleStyle()
style['ALLOW_MULTILINE_DICTIONARY_KEYS'] = True
style['INDENT_DICTIONARY_VALUE'] = True
style['INDENT_WIDTH'] = 2
style['JOIN_MULTIPLE_LINES'] = False
style['SPLIT_BEFORE_BITWISE_OPERATOR'] = True
style['SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN'] = True
return style
def CreateFacebookStyle():
style = CreatePEP8Style()
style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False
style['COLUMN_LIMIT'] = 80
style['DEDENT_CLOSING_BRACKETS'] = True
style['INDENT_DICTIONARY_VALUE'] = True
style['JOIN_MULTIPLE_LINES'] = False
style['SPACES_BEFORE_COMMENT'] = 2
style['SPLIT_PENALTY_AFTER_OPENING_BRACKET'] = 0
style['SPLIT_PENALTY_BEFORE_IF_EXPR'] = 30
style['SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT'] = 30
style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False
style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False
return style
_STYLE_NAME_TO_FACTORY = dict(
pep8=CreatePEP8Style,
chromium=CreateChromiumStyle,
google=CreateGoogleStyle,
facebook=CreateFacebookStyle,
)
_DEFAULT_STYLE_TO_FACTORY = [
(CreateChromiumStyle(), CreateChromiumStyle),
(CreateFacebookStyle(), CreateFacebookStyle),
(CreateGoogleStyle(), CreateGoogleStyle),
(CreatePEP8Style(), CreatePEP8Style),
]
def _GetStyleFactory(style):
for def_style, factory in _DEFAULT_STYLE_TO_FACTORY:
if style == def_style:
return factory
return None
def _ContinuationAlignStyleStringConverter(s):
"""Option value converter for a continuation align style string."""
accepted_styles = ('SPACE', 'FIXED', 'VALIGN-RIGHT')
if s:
r = s.upper()
if r not in accepted_styles:
raise ValueError('unknown continuation align style: %r' % (s,))
else:
r = accepted_styles[0]
return r
def _StringListConverter(s):
"""Option value converter for a comma-separated list of strings."""
return [part.strip() for part in s.split(',')]
def _StringSetConverter(s):
"""Option value converter for a comma-separated set of strings."""
return set(part.strip() for part in s.split(','))
def _BoolConverter(s):
"""Option value converter for a boolean."""
return py3compat.CONFIGPARSER_BOOLEAN_STATES[s.lower()]
# Different style options need to have their values interpreted differently when
# read from the config file. This dict maps an option name to a "converter"
# function that accepts the string read for the option's value from the file and
# returns it wrapper in actual Python type that's going to be meaningful to
# yapf.
#
# Note: this dict has to map all the supported style options.
_STYLE_OPTION_VALUE_CONVERTER = dict(
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=_BoolConverter,
ALLOW_MULTILINE_LAMBDAS=_BoolConverter,
ALLOW_MULTILINE_DICTIONARY_KEYS=_BoolConverter,
ALLOW_SPLIT_BEFORE_DICT_VALUE=_BoolConverter,
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=_BoolConverter,
BLANK_LINE_BEFORE_CLASS_DOCSTRING=_BoolConverter,
BLANK_LINE_BEFORE_MODULE_DOCSTRING=_BoolConverter,
BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=int,
COALESCE_BRACKETS=_BoolConverter,
COLUMN_LIMIT=int,
CONTINUATION_ALIGN_STYLE=_ContinuationAlignStyleStringConverter,
CONTINUATION_INDENT_WIDTH=int,
DEDENT_CLOSING_BRACKETS=_BoolConverter,
DISABLE_ENDING_COMMA_HEURISTIC=_BoolConverter,
EACH_DICT_ENTRY_ON_SEPARATE_LINE=_BoolConverter,
I18N_COMMENT=str,
I18N_FUNCTION_CALL=_StringListConverter,
INDENT_DICTIONARY_VALUE=_BoolConverter,
INDENT_WIDTH=int,
INDENT_BLANK_LINES=_BoolConverter,
JOIN_MULTIPLE_LINES=_BoolConverter,
NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=_StringSetConverter,
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=_BoolConverter,
SPACES_AROUND_POWER_OPERATOR=_BoolConverter,
SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=_BoolConverter,
SPACES_BEFORE_COMMENT=int,
SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=_BoolConverter,
SPLIT_ALL_COMMA_SEPARATED_VALUES=_BoolConverter,
SPLIT_BEFORE_BITWISE_OPERATOR=_BoolConverter,
SPLIT_BEFORE_CLOSING_BRACKET=_BoolConverter,
SPLIT_BEFORE_DICT_SET_GENERATOR=_BoolConverter,
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=_BoolConverter,
SPLIT_BEFORE_FIRST_ARGUMENT=_BoolConverter,
SPLIT_BEFORE_LOGICAL_OPERATOR=_BoolConverter,
SPLIT_BEFORE_NAMED_ASSIGNS=_BoolConverter,
SPLIT_COMPLEX_COMPREHENSION=_BoolConverter,
SPLIT_PENALTY_AFTER_OPENING_BRACKET=int,
SPLIT_PENALTY_AFTER_UNARY_OPERATOR=int,
SPLIT_PENALTY_BEFORE_IF_EXPR=int,
SPLIT_PENALTY_BITWISE_OPERATOR=int,
SPLIT_PENALTY_COMPREHENSION=int,
SPLIT_PENALTY_EXCESS_CHARACTER=int,
SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=int,
SPLIT_PENALTY_IMPORT_NAMES=int,
SPLIT_PENALTY_LOGICAL_OPERATOR=int,
USE_TABS=_BoolConverter,
)
def CreateStyleFromConfig(style_config):
"""Create a style dict from the given config.
Arguments:
style_config: either a style name or a file name. The file is expected to
contain settings. It can have a special BASED_ON_STYLE setting naming the
style which it derives from. If no such setting is found, it derives from
the default style. When style_config is None, the _GLOBAL_STYLE_FACTORY
config is created.
Returns:
A style dict.
Raises:
StyleConfigError: if an unknown style option was encountered.
"""
def GlobalStyles():
for style, _ in _DEFAULT_STYLE_TO_FACTORY:
yield style
def_style = False
if style_config is None:
for style in GlobalStyles():
if _style == style:
def_style = True
break
if not def_style:
return _style
return _GLOBAL_STYLE_FACTORY()
if isinstance(style_config, dict):
config = _CreateConfigParserFromConfigDict(style_config)
elif isinstance(style_config, py3compat.basestring):
style_factory = _STYLE_NAME_TO_FACTORY.get(style_config.lower())
if style_factory is not None:
return style_factory()
if style_config.startswith('{'):
# Most likely a style specification from the command line.
config = _CreateConfigParserFromConfigString(style_config)
else:
# Unknown config name: assume it's a file name then.
config = _CreateConfigParserFromConfigFile(style_config)
return _CreateStyleFromConfigParser(config)
def _CreateConfigParserFromConfigDict(config_dict):
config = py3compat.ConfigParser()
config.add_section('style')
for key, value in config_dict.items():
config.set('style', key, str(value))
return config
def _CreateConfigParserFromConfigString(config_string):
"""Given a config string from the command line, return a config parser."""
if config_string[0] != '{' or config_string[-1] != '}':
raise StyleConfigError(
"Invalid style dict syntax: '{}'.".format(config_string))
config = py3compat.ConfigParser()
config.add_section('style')
for key, value in re.findall(r'([a-zA-Z0-9_]+)\s*[:=]\s*([a-zA-Z0-9_]+)',
config_string):
config.set('style', key, value)
return config
def _CreateConfigParserFromConfigFile(config_filename):
"""Read the file and return a ConfigParser object."""
if not os.path.exists(config_filename):
# Provide a more meaningful error here.
raise StyleConfigError(
'"{0}" is not a valid style or file path'.format(config_filename))
with open(config_filename) as style_file:
config = py3compat.ConfigParser()
config.read_file(style_file)
if config_filename.endswith(SETUP_CONFIG):
if not config.has_section('yapf'):
raise StyleConfigError(
'Unable to find section [yapf] in {0}'.format(config_filename))
elif config_filename.endswith(LOCAL_STYLE):
if not config.has_section('style'):
raise StyleConfigError(
'Unable to find section [style] in {0}'.format(config_filename))
else:
if not config.has_section('style'):
raise StyleConfigError(
'Unable to find section [style] in {0}'.format(config_filename))
return config
def _CreateStyleFromConfigParser(config):
"""Create a style dict from a configuration file.
Arguments:
config: a ConfigParser object.
Returns:
A style dict.
Raises:
StyleConfigError: if an unknown style option was encountered.
"""
# Initialize the base style.
section = 'yapf' if config.has_section('yapf') else 'style'
if config.has_option('style', 'based_on_style'):
based_on = config.get('style', 'based_on_style').lower()
base_style = _STYLE_NAME_TO_FACTORY[based_on]()
elif config.has_option('yapf', 'based_on_style'):
based_on = config.get('yapf', 'based_on_style').lower()
base_style = _STYLE_NAME_TO_FACTORY[based_on]()
else:
base_style = _GLOBAL_STYLE_FACTORY()
# Read all options specified in the file and update the style.
for option, value in config.items(section):
if option.lower() == 'based_on_style':
# Now skip this one - we've already handled it and it's not one of the
# recognized style options.
continue
option = option.upper()
if option not in _STYLE_OPTION_VALUE_CONVERTER:
raise StyleConfigError('Unknown style option "{0}"'.format(option))
try:
base_style[option] = _STYLE_OPTION_VALUE_CONVERTER[option](value)
except ValueError:
raise StyleConfigError("'{}' is not a valid setting for {}.".format(
value, option))
return base_style
# The default style - used if yapf is not invoked without specifically
# requesting a formatting style.
DEFAULT_STYLE = 'pep8'
DEFAULT_STYLE_FACTORY = CreatePEP8Style
_GLOBAL_STYLE_FACTORY = CreatePEP8Style
# The name of the file to use for global style definition.
GLOBAL_STYLE = (
os.path.join(
os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'yapf',
'style'))
# The name of the file to use for directory-local style definition.
LOCAL_STYLE = '.style.yapf'
# Alternative place for directory-local style definition. Style should be
# specified in the '[yapf]' section.
SETUP_CONFIG = 'setup.cfg'
# TODO(eliben): For now we're preserving the global presence of a style dict.
# Refactor this so that the style is passed around through yapf rather than
# being global.
_style = None
SetGlobalStyle(_GLOBAL_STYLE_FACTORY())
| 37.959677
| 82
| 0.712896
|
386f6bb5cf7e2912e02d05346145e523c2fdc590
| 7,756
|
py
|
Python
|
bbarchivist/xmlutils.py
|
thurask/bbarchivist
|
637beec4d5251871dc96a801ec2058371bb1c092
|
[
"WTFPL"
] | 7
|
2015-12-12T14:28:57.000Z
|
2022-01-11T23:28:07.000Z
|
bbarchivist/xmlutils.py
|
thurask/bbarchivist
|
637beec4d5251871dc96a801ec2058371bb1c092
|
[
"WTFPL"
] | null | null | null |
bbarchivist/xmlutils.py
|
thurask/bbarchivist
|
637beec4d5251871dc96a801ec2058371bb1c092
|
[
"WTFPL"
] | 2
|
2017-05-07T18:55:08.000Z
|
2019-04-10T05:12:30.000Z
|
#!/usr/bin/env python3
"""This module is used for XML handling."""
import re # regexes
try:
from defusedxml import ElementTree # safer XML parsing
except (ImportError, AttributeError):
from xml.etree import ElementTree # XML parsing
__author__ = "Thurask"
__license__ = "WTFPL v2"
__copyright__ = "2018-2019 Thurask"
def cchecker_get_tags(roottext):
"""
Get country and carrier from XML.
:param roottext: XML text.
:type roottext: str
"""
root = ElementTree.fromstring(roottext)
for child in root:
if child.tag == "country":
country = child.get("name")
if child.tag == "carrier":
carrier = child.get("name")
return country, carrier
def prep_available_bundle(device, npc):
"""
Prepare bundle query XML.
:param device: Hexadecimal hardware ID.
:type device: str
:param npc: MCC + MNC (see `func:bbarchivist.networkutils.return_npc`)
:type npc: int
"""
query = '<?xml version="1.0" encoding="UTF-8"?><availableBundlesRequest version="1.0.0" authEchoTS="1366644680359"><deviceId><pin>0x2FFFFFB3</pin></deviceId><clientProperties><hardware><id>0x{0}</id><isBootROMSecure>true</isBootROMSecure></hardware><network><vendorId>0x0</vendorId><homeNPC>0x{1}</homeNPC><currentNPC>0x{1}</currentNPC></network><software><currentLocale>en_US</currentLocale><legalLocale>en_US</legalLocale><osVersion>10.0.0.0</osVersion><radioVersion>10.0.0.0</radioVersion></software></clientProperties><updateDirectives><bundleVersionFilter></bundleVersionFilter></updateDirectives></availableBundlesRequest>'.format(device, npc)
return query
def parse_available_bundle(roottext):
"""
Get bundles from XML.
:param roottext: XML text.
:type roottext: str
"""
root = ElementTree.fromstring(roottext)
package = root.find('./data/content')
bundlelist = [child.attrib["version"] for child in package]
return bundlelist
def carrier_swver_get(root):
"""
Get software release from carrier XML.
:param root: ElementTree we're barking up.
:type root: xml.etree.ElementTree.ElementTree
"""
for child in root.iter("softwareReleaseMetadata"):
swver = child.get("softwareReleaseVersion")
return swver
def carrier_child_fileappend(child, files, baseurl, blitz=False):
"""
Append bar file links to a list from a child element.
:param child: Child element in use.
:type child: xml.etree.ElementTree.Element
:param files: Filelist.
:type files: list(str)
:param baseurl: Base URL, URL minus the filename.
:type baseurl: str
:param blitz: Whether or not to create a blitz package. False by default.
:type blitz: bool
"""
if not blitz:
files.append(baseurl + child.get("path"))
else:
if child.get("type") not in ["system:radio", "system:desktop", "system:os"]:
files.append(baseurl + child.get("path"))
return files
def carrier_child_finder(root, files, baseurl, blitz=False):
"""
Extract filenames, radio and OS from child elements.
:param root: ElementTree we're barking up.
:type root: xml.etree.ElementTree.ElementTree
:param files: Filelist.
:type files: list(str)
:param baseurl: Base URL, URL minus the filename.
:type baseurl: str
:param blitz: Whether or not to create a blitz package. False by default.
:type blitz: bool
"""
osver = radver = ""
for child in root.iter("package"):
files = carrier_child_fileappend(child, files, baseurl, blitz)
if child.get("type") == "system:radio":
radver = child.get("version")
elif child.get("type") == "system:desktop":
osver = child.get("version")
elif child.get("type") == "system:os":
osver = child.get("version")
return osver, radver, files
def parse_carrier_xml(data, blitz=False):
"""
Parse the response to a carrier update request and return the juicy bits.
:param data: The data to parse.
:type data: xml
:param blitz: Whether or not to create a blitz package. False by default.
:type blitz: bool
"""
root = ElementTree.fromstring(data)
sw_exists = root.find('./data/content/softwareReleaseMetadata')
swver = "N/A" if sw_exists is None else ""
if sw_exists is not None:
swver = carrier_swver_get(root)
files = []
package_exists = root.find('./data/content/fileSets/fileSet')
osver = radver = ""
if package_exists is not None:
baseurl = "{0}/".format(package_exists.get("url"))
osver, radver, files = carrier_child_finder(root, files, baseurl, blitz)
return swver, osver, radver, files
def prep_carrier_query(npc, device, upg, forced):
"""
Prepare carrier query XML.
:param npc: MCC + MNC (see `func:return_npc`)
:type npc: int
:param device: Hexadecimal hardware ID.
:type device: str
:param upg: "upgrade" or "repair".
:type upg: str
:param forced: Force a software release.
:type forced: str
"""
query = '<?xml version="1.0" encoding="UTF-8"?><updateDetailRequest version="2.2.1" authEchoTS="1366644680359"><clientProperties><hardware><pin>0x2FFFFFB3</pin><bsn>1128121361</bsn><imei>004401139269240</imei><id>0x{0}</id></hardware><network><homeNPC>0x{1}</homeNPC><iccid>89014104255505565333</iccid></network><software><currentLocale>en_US</currentLocale><legalLocale>en_US</legalLocale></software></clientProperties><updateDirectives><allowPatching type="REDBEND">true</allowPatching><upgradeMode>{2}</upgradeMode><provideDescriptions>false</provideDescriptions><provideFiles>true</provideFiles><queryType>NOTIFICATION_CHECK</queryType></updateDirectives><pollType>manual</pollType><resultPackageSetCriteria><softwareRelease softwareReleaseVersion="{3}" /><releaseIndependent><packageType operation="include">application</packageType></releaseIndependent></resultPackageSetCriteria></updateDetailRequest>'.format(device, npc, upg, forced)
return query
def prep_sr_lookup(osver):
"""
Prepare software lookup XML.
:param osver: OS version to lookup, 10.x.y.zzzz.
:type osver: str
"""
query = '<?xml version="1.0" encoding="UTF-8"?><srVersionLookupRequest version="2.0.0" authEchoTS="1366644680359"><clientProperties><hardware><pin>0x2FFFFFB3</pin><bsn>1140011878</bsn><imei>004402242176786</imei><id>0x8D00240A</id><isBootROMSecure>true</isBootROMSecure></hardware><network><vendorId>0x0</vendorId><homeNPC>0x60</homeNPC><currentNPC>0x60</currentNPC><ecid>0x1</ecid></network><software><currentLocale>en_US</currentLocale><legalLocale>en_US</legalLocale><osVersion>{0}</osVersion><omadmEnabled>false</omadmEnabled></software></clientProperties></srVersionLookupRequest>'.format(osver)
return query
def parse_sr_lookup(reqtext):
"""
Take the text of a software lookup request response and parse it as XML.
:param reqtext: Response text, hopefully XML formatted.
:type reqtext: str
"""
try:
root = ElementTree.fromstring(reqtext)
except ElementTree.ParseError:
packtext = "SR not in system"
else:
packtext = sr_lookup_extractor(root)
return packtext
def sr_lookup_extractor(root):
"""
Take an ElementTree and extract a software release from it.
:param root: ElementTree we're barking up.
:type root: xml.etree.ElementTree.ElementTree
"""
reg = re.compile(r"(\d{1,4}\.)(\d{1,4}\.)(\d{1,4}\.)(\d{1,4})")
packages = root.findall('./data/content/')
for package in packages:
if package.text is not None:
match = reg.match(package.text)
packtext = package.text if match else "SR not in system"
return packtext
| 37.468599
| 946
| 0.687597
|
e224e2bb040df11488723a94a4fd7e9e94342479
| 3,190
|
py
|
Python
|
tests/test_api_empty_sandbox.py
|
QualiSystemsLab/Sandbox-API-Python
|
1e5027a4a4fe87a763379d21d1ce48393a6ad188
|
[
"MIT"
] | 3
|
2018-01-23T23:20:58.000Z
|
2022-01-10T21:54:16.000Z
|
tests/test_api_empty_sandbox.py
|
QualiSystemsLab/Sandbox-API-Python
|
1e5027a4a4fe87a763379d21d1ce48393a6ad188
|
[
"MIT"
] | null | null | null |
tests/test_api_empty_sandbox.py
|
QualiSystemsLab/Sandbox-API-Python
|
1e5027a4a4fe87a763379d21d1ce48393a6ad188
|
[
"MIT"
] | 3
|
2017-10-10T16:10:49.000Z
|
2018-03-16T19:00:15.000Z
|
"""
Test the api methods that require an empty, PUBLIC blueprint
"""
import common
import pytest
from cloudshell.sandbox_rest.sandbox_api import SandboxRestApiSession
@pytest.fixture(scope="module")
def blueprint_id(admin_session: SandboxRestApiSession, empty_blueprint):
res_id = common.get_blueprint_id_from_name(admin_session, empty_blueprint)
assert isinstance(res_id, str)
return res_id
@pytest.fixture(scope="module")
def sandbox_id(admin_session: SandboxRestApiSession, blueprint_id):
# start sandbox
start_res = admin_session.start_sandbox(blueprint_id=blueprint_id, sandbox_name="Pytest empty blueprint test")
sandbox_id = start_res["id"]
print(f"Sandbox started: {sandbox_id}")
yield sandbox_id
admin_session.stop_sandbox(sandbox_id)
print(f"\nSandbox ended: {sandbox_id}")
def test_start_stop(sandbox_id):
assert isinstance(sandbox_id, str)
print(f"Sandbox ID: {sandbox_id}")
def test_get_sandbox_details(admin_session, sandbox_id):
common.random_sleep()
details_res = admin_session.get_sandbox_details(sandbox_id)
assert isinstance(details_res, dict)
sb_name = details_res["name"]
print(f"Pulled details for sandbox '{sb_name}'")
def test_get_components(admin_session, sandbox_id):
common.random_sleep()
components_res = admin_session.get_sandbox_components(sandbox_id)
assert isinstance(components_res, list)
component_count = len(components_res)
print(f"component count found: {component_count}")
def test_get_sandbox_commands(admin_session, sandbox_id):
common.random_sleep()
commands_res = admin_session.get_sandbox_commands(sandbox_id)
assert isinstance(commands_res, list)
print(f"Sandbox commands: {[x['name'] for x in commands_res]}")
first_sb_command = admin_session.get_sandbox_command_details(sandbox_id, commands_res[0]["name"])
print(f"SB command name: {first_sb_command['name']}\n" f"description: {first_sb_command['description']}")
def test_get_sandbox_events(admin_session, sandbox_id):
common.random_sleep()
activity_res = admin_session.get_sandbox_activity(sandbox_id)
assert isinstance(activity_res, dict) and "events" in activity_res
events = activity_res["events"]
print(f"activity events count: {len(events)}")
def test_get_console_output(admin_session, sandbox_id):
common.random_sleep()
output_res = admin_session.get_sandbox_output(sandbox_id)
assert isinstance(output_res, dict) and "entries" in output_res
entries = output_res["entries"]
print(f"Sandbox output entries count: {len(entries)}")
def test_get_instructions(admin_session, sandbox_id):
common.random_sleep()
instructions_res = admin_session.get_sandbox_instructions(sandbox_id)
assert isinstance(instructions_res, str)
print(f"Pulled sandbox instructions: '{instructions_res}'")
def test_extend_sandbox(admin_session, sandbox_id):
common.random_sleep()
extend_response = admin_session.extend_sandbox(sandbox_id, "PT0H10M")
assert isinstance(extend_response, dict) and "remaining_time" in extend_response
print(f"extended sandbox. Remaining time: {extend_response['remaining_time']}")
| 36.666667
| 114
| 0.772727
|
fe2a9d2cac819b2d84d92a247f99bff73f444b12
| 245
|
py
|
Python
|
Radius.py
|
subho781/MCA-Python-Assignment
|
ebeb443ed72091659eacd30c0221ffb69b7df547
|
[
"MIT"
] | null | null | null |
Radius.py
|
subho781/MCA-Python-Assignment
|
ebeb443ed72091659eacd30c0221ffb69b7df547
|
[
"MIT"
] | null | null | null |
Radius.py
|
subho781/MCA-Python-Assignment
|
ebeb443ed72091659eacd30c0221ffb69b7df547
|
[
"MIT"
] | null | null | null |
#program to calculate the area of the circle take radius as user input
pi = 3.14 #initializing the value of pi
radius = float(input("Enter the radius: "))
area = pi * (radius**2) #calculating the area
print("The area of the circle is", area )
| 49
| 71
| 0.718367
|
67f9e64cbe0263be066f0a9b392e5304e4812d68
| 148
|
py
|
Python
|
libcloud_rest/api/versions.py
|
islamgulov/libcloud.rest
|
f76e3c013cf10d64e0b7e9090db4224405a1aeb8
|
[
"Apache-2.0"
] | 5
|
2015-03-20T16:43:26.000Z
|
2017-11-22T04:19:50.000Z
|
libcloud_rest/api/versions.py
|
coyo8/libcloud.rest
|
07e662eceeb18e6659dffb0d1073adbb0b960efe
|
[
"Apache-2.0"
] | 2
|
2018-02-16T03:18:29.000Z
|
2020-05-13T12:37:28.000Z
|
libcloud_rest/api/versions.py
|
coyo8/libcloud.rest
|
07e662eceeb18e6659dffb0d1073adbb0b960efe
|
[
"Apache-2.0"
] | 5
|
2015-11-04T08:24:15.000Z
|
2020-04-17T21:54:55.000Z
|
# -*- coding:utf-8 -*-
import libcloud
__all__ = [
'versions',
]
versions = {
libcloud.__version__: '0.1', # FIXME: it's just for test
}
| 13.454545
| 61
| 0.587838
|
18ae666d98fcdd105e07909b851d2f14807eede0
| 237
|
py
|
Python
|
api/serializers/user_serializer.py
|
eHattori/django-model-architecture
|
2f8a66f47c7000cda09f07d07709a5f235322b55
|
[
"MIT"
] | 1
|
2017-06-07T20:23:48.000Z
|
2017-06-07T20:23:48.000Z
|
api/serializers/user_serializer.py
|
eHattori/django-model-architecture
|
2f8a66f47c7000cda09f07d07709a5f235322b55
|
[
"MIT"
] | null | null | null |
api/serializers/user_serializer.py
|
eHattori/django-model-architecture
|
2f8a66f47c7000cda09f07d07709a5f235322b55
|
[
"MIT"
] | null | null | null |
from api.models.user import User
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'other_name')
| 29.625
| 64
| 0.729958
|
773b133e5c8c955d77d9c3adfef9fd85a6fad001
| 61,115
|
py
|
Python
|
ludwig/data/preprocessing.py
|
Yard1/ludwig
|
510455f8d4fcd6b66e76d2c906d2c600fe724093
|
[
"Apache-2.0"
] | null | null | null |
ludwig/data/preprocessing.py
|
Yard1/ludwig
|
510455f8d4fcd6b66e76d2c906d2c600fe724093
|
[
"Apache-2.0"
] | null | null | null |
ludwig/data/preprocessing.py
|
Yard1/ludwig
|
510455f8d4fcd6b66e76d2c906d2c600fe724093
|
[
"Apache-2.0"
] | 1
|
2019-09-05T09:19:39.000Z
|
2019-09-05T09:19:39.000Z
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from ludwig.backend import LOCAL_BACKEND
from ludwig.constants import *
from ludwig.constants import TEXT
from ludwig.data.concatenate_datasets import concatenate_files, concatenate_df
from ludwig.data.dataset.base import Dataset
from ludwig.features.feature_registries import (base_type_registry,
input_type_registry)
from ludwig.features.feature_utils import compute_feature_hash
from ludwig.utils import data_utils
from ludwig.utils.data_utils import (CACHEABLE_FORMATS, CSV_FORMATS,
DATA_TRAIN_HDF5_FP,
DATAFRAME_FORMATS,
DICT_FORMATS, EXCEL_FORMATS,
FEATHER_FORMATS, FWF_FORMATS,
HDF5_FORMATS, HTML_FORMATS, JSON_FORMATS,
JSONL_FORMATS, ORC_FORMATS,
PARQUET_FORMATS, PICKLE_FORMATS,
SAS_FORMATS, SPSS_FORMATS, STATA_FORMATS,
TFRECORD_FORMATS, TSV_FORMATS, figure_data_format,
override_in_memory_flag, read_csv,
read_excel, read_feather, read_fwf,
read_html, read_json, read_jsonl,
read_orc, read_parquet, read_pickle,
read_sas, read_spss, read_stata, read_tsv,
split_dataset_ttv)
from ludwig.utils.data_utils import save_array, get_split_path
from ludwig.utils.defaults import (default_preprocessing_parameters,
default_random_seed)
from ludwig.utils.fs_utils import path_exists
from ludwig.utils.misc_utils import (get_from_registry, merge_dict,
resolve_pointers, set_random_seed,
get_proc_features_from_lists)
logger = logging.getLogger(__name__)
class DataFormatPreprocessor(ABC):
@staticmethod
@abstractmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
pass
@staticmethod
@abstractmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
pass
@staticmethod
@abstractmethod
def prepare_processed_data(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
pass
class DictPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
num_overrides = override_in_memory_flag(features, True)
if num_overrides > 0:
logger.warning(
'Using in_memory = False is not supported '
'with {} data format.'.format('dict')
)
df_engine = backend.df_engine
if dataset is not None:
dataset = df_engine.from_pandas(pd.DataFrame(dataset))
if training_set is not None:
training_set = df_engine.from_pandas(pd.DataFrame(training_set))
if validation_set is not None:
validation_set = df_engine.from_pandas(
pd.DataFrame(validation_set))
if test_set is not None:
test_set = df_engine.from_pandas(pd.DataFrame(test_set))
return _preprocess_df_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
training_set_metadata=training_set_metadata,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset, training_set_metadata = build_dataset(
pd.DataFrame(dataset),
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class DataFramePreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
num_overrides = override_in_memory_flag(features, True)
if num_overrides > 0:
logger.warning(
'Using in_memory = False is not supported '
'with {} data format.'.format('dataframe')
)
return _preprocess_df_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
training_set_metadata=training_set_metadata,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset, training_set_metadata = build_dataset(
dataset,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class CSVPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_csv,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_csv(dataset)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class TSVPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_tsv,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_tsv(dataset)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class JSONPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_json,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_json(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class JSONLPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_jsonl,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_jsonl(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class ExcelPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_excel,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_excel(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class ParquetPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_parquet,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_parquet(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
@staticmethod
def prepare_processed_data(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
test_set = test_set if test_set and path_exists(test_set) else None
validation_set = validation_set if validation_set and path_exists(validation_set) else None
return training_set, test_set, validation_set, training_set_metadata
class PicklePreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_pickle,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_pickle(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class FatherPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_feather,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_feather(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class FWFPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_fwf,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_fwf(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class HTMLPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_html,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_html(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class ORCPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_orc,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_orc(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class SASPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_sas,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_sas(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class SPSSPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_spss,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_spss(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class StataPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return _preprocess_file_for_training(
features,
dataset,
training_set,
validation_set,
test_set,
read_fn=read_stata,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
dataset_df = read_stata(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
dataset, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend
)
return dataset, training_set_metadata, None
class HDF5Preprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return HDF5Preprocessor.prepare_processed_data(
features,
dataset,
training_set,
validation_set,
test_set,
training_set_metadata,
skip_save_processed_input,
preprocessing_params,
backend,
random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
hdf5_fp = dataset
dataset = load_hdf5(
dataset,
features,
split_data=False,
shuffle_training=False
)
return dataset, training_set_metadata, hdf5_fp
@staticmethod
def prepare_processed_data(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
if dataset is None and training_set is None:
raise ValueError(
'One of `dataset` or `training_set` must be not None')
not_none_set = dataset if dataset is not None else training_set
if not training_set_metadata:
raise ValueError('When providing HDF5 data, '
'training_set_metadata must not be None.')
logger.info('Using full hdf5 and json')
if DATA_TRAIN_HDF5_FP not in training_set_metadata:
logger.warning(
'data_train_hdf5_fp not present in training_set_metadata. '
'Adding it with the current HDF5 file path {}'.format(
not_none_set
)
)
training_set_metadata[DATA_TRAIN_HDF5_FP] = not_none_set
elif training_set_metadata[DATA_TRAIN_HDF5_FP] != not_none_set:
logger.warning(
'data_train_hdf5_fp in training_set_metadata is {}, '
'different from the current HDF5 file path {}. '
'Replacing it'.format(
training_set_metadata[DATA_TRAIN_HDF5_FP],
not_none_set
)
)
training_set_metadata[DATA_TRAIN_HDF5_FP] = not_none_set
if dataset is not None:
training_set, test_set, validation_set = load_hdf5(
dataset,
features,
shuffle_training=True
)
elif training_set is not None:
kwargs = dict(features=features, split_data=False)
training_set = load_hdf5(training_set,
shuffle_training=True,
**kwargs)
if validation_set is not None:
validation_set = load_hdf5(validation_set,
shuffle_training=False,
**kwargs)
if test_set is not None:
test_set = load_hdf5(test_set,
shuffle_training=False,
**kwargs)
return training_set, test_set, validation_set, training_set_metadata
class TFRecordPreprocessor(DataFormatPreprocessor):
@staticmethod
def preprocess_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
return TFRecordPreprocessor.prepare_processed_data(
features,
dataset,
training_set,
validation_set,
test_set,
training_set_metadata,
skip_save_processed_input,
preprocessing_params,
backend,
random_seed
)
@staticmethod
def preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
):
return dataset, training_set_metadata, None
@staticmethod
def prepare_processed_data(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
test_set = test_set if test_set and path_exists(test_set) else None
validation_set = validation_set if validation_set and path_exists(validation_set) else None
return training_set, test_set, validation_set, training_set_metadata
data_format_preprocessor_registry = {
**{fmt: DictPreprocessor for fmt in DICT_FORMATS},
**{fmt: DataFramePreprocessor for fmt in DATAFRAME_FORMATS},
**{fmt: CSVPreprocessor for fmt in CSV_FORMATS},
**{fmt: TSVPreprocessor for fmt in TSV_FORMATS},
**{fmt: JSONPreprocessor for fmt in JSON_FORMATS},
**{fmt: JSONLPreprocessor for fmt in JSONL_FORMATS},
**{fmt: ExcelPreprocessor for fmt in EXCEL_FORMATS},
**{fmt: ParquetPreprocessor for fmt in PARQUET_FORMATS},
**{fmt: PicklePreprocessor for fmt in PICKLE_FORMATS},
**{fmt: FWFPreprocessor for fmt in FWF_FORMATS},
**{fmt: FatherPreprocessor for fmt in FEATHER_FORMATS},
**{fmt: HTMLPreprocessor for fmt in HTML_FORMATS},
**{fmt: ORCPreprocessor for fmt in ORC_FORMATS},
**{fmt: SASPreprocessor for fmt in SAS_FORMATS},
**{fmt: SPSSPreprocessor for fmt in SPSS_FORMATS},
**{fmt: StataPreprocessor for fmt in STATA_FORMATS},
**{fmt: HDF5Preprocessor for fmt in HDF5_FORMATS},
**{fmt: TFRecordPreprocessor for fmt in TFRECORD_FORMATS},
}
def build_dataset(
dataset_df,
features,
global_preprocessing_parameters,
metadata=None,
backend=LOCAL_BACKEND,
random_seed=default_random_seed,
skip_save_processed_input=False
):
df_engine = backend.df_engine
dataset_df = df_engine.parallelize(dataset_df)
# If persisting DataFrames in memory is enabled, we want to do this after
# each batch of parallel ops in order to avoid redundant computation
dataset_df = backend.df_engine.persist(dataset_df)
global_preprocessing_parameters = merge_dict(
default_preprocessing_parameters,
global_preprocessing_parameters
)
# Get all the unique preprocessing features to compute
proc_features = []
feature_hashes = set()
for feature in features:
if PROC_COLUMN not in feature:
feature[PROC_COLUMN] = compute_feature_hash(feature)
if feature[PROC_COLUMN] not in feature_hashes:
proc_features.append(feature)
feature_hashes.add(feature[PROC_COLUMN])
dataset_cols = cast_columns(
dataset_df,
proc_features,
global_preprocessing_parameters,
backend
)
metadata = build_metadata(
metadata,
dataset_cols,
proc_features,
global_preprocessing_parameters,
backend
)
proc_cols = build_data(
dataset_cols,
proc_features,
metadata,
backend,
skip_save_processed_input
)
proc_cols[SPLIT] = get_split(
dataset_df,
force_split=global_preprocessing_parameters['force_split'],
split_probabilities=global_preprocessing_parameters[
'split_probabilities'
],
stratify=global_preprocessing_parameters['stratify'],
backend=backend,
random_seed=random_seed
)
dataset = backend.df_engine.df_like(dataset_df, proc_cols)
# At this point, there should be no missing values left in the dataframe, unless
# the DROP_ROW preprocessing option was selected, in which case we need to drop those
# rows.
dataset = dataset.dropna()
return dataset, metadata
def cast_columns(dataset_df, features, global_preprocessing_parameters,
backend):
# todo figure out if global_preprocessing_parameters is needed
dataset_cols = {}
for feature in features:
cast_column = get_from_registry(
feature[TYPE],
base_type_registry
).cast_column
# todo figure out if additional parameters are needed
# for the cast_column function
dataset_cols[feature[COLUMN]] = cast_column(
dataset_df[feature[COLUMN]],
backend
)
return dataset_cols
def build_metadata(
metadata, dataset_cols, features, global_preprocessing_parameters, backend
):
for feature in features:
if feature[NAME] in metadata:
continue
if PREPROCESSING in feature:
preprocessing_parameters = merge_dict(
global_preprocessing_parameters[feature[TYPE]],
feature[PREPROCESSING]
)
else:
preprocessing_parameters = global_preprocessing_parameters[
feature[TYPE]
]
# deal with encoders that have fixed preprocessing
if 'encoder' in feature:
encoders_registry = get_from_registry(
feature[TYPE],
input_type_registry
).encoder_registry
encoder_class = encoders_registry[feature['encoder']]
if hasattr(encoder_class, 'fixed_preprocessing_parameters'):
encoder_fpp = encoder_class.fixed_preprocessing_parameters
preprocessing_parameters = merge_dict(
preprocessing_parameters,
resolve_pointers(encoder_fpp, feature, 'feature.')
)
fill_value = precompute_fill_value(
dataset_cols,
feature,
preprocessing_parameters,
backend
)
if fill_value is not None:
preprocessing_parameters = {
'computed_fill_value': fill_value,
**preprocessing_parameters
}
handle_missing_values(
dataset_cols,
feature,
preprocessing_parameters
)
get_feature_meta = get_from_registry(
feature[TYPE],
base_type_registry
).get_feature_meta
column = dataset_cols[feature[COLUMN]]
if column.dtype == object:
column = column.astype(str)
metadata[feature[NAME]] = get_feature_meta(
column,
preprocessing_parameters,
backend
)
metadata[feature[NAME]][PREPROCESSING] = preprocessing_parameters
return metadata
def build_data(
input_cols,
features,
training_set_metadata,
backend,
skip_save_processed_input
):
proc_cols = {}
for feature in features:
preprocessing_parameters = \
training_set_metadata[feature[NAME]][
PREPROCESSING]
handle_missing_values(
input_cols,
feature,
preprocessing_parameters
)
add_feature_data = get_from_registry(
feature[TYPE],
base_type_registry
).add_feature_data
proc_cols = add_feature_data(
feature,
input_cols,
proc_cols,
training_set_metadata,
preprocessing_parameters,
backend,
skip_save_processed_input
)
return proc_cols
def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend):
missing_value_strategy = preprocessing_parameters['missing_value_strategy']
if missing_value_strategy == FILL_WITH_CONST:
return preprocessing_parameters['fill_value']
elif missing_value_strategy == FILL_WITH_MODE:
return dataset_cols[feature[COLUMN]].value_counts().index[0]
elif missing_value_strategy == FILL_WITH_MEAN:
if feature[TYPE] != NUMERICAL:
raise ValueError(
'Filling missing values with mean is supported '
'only for numerical types',
)
return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean())
# Otherwise, we cannot precompute the fill value for this dataset
return None
def handle_missing_values(dataset_cols, feature, preprocessing_parameters):
missing_value_strategy = preprocessing_parameters['missing_value_strategy']
# Check for the precomputed fill value in the metadata
computed_fill_value = preprocessing_parameters.get('computed_fill_value')
if computed_fill_value is not None:
dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].fillna(
computed_fill_value,
)
elif missing_value_strategy in [BACKFILL, BFILL, PAD, FFILL]:
dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].fillna(
method=missing_value_strategy,
)
elif missing_value_strategy == DROP_ROW:
# Here we only drop from this series, but after preprocessing we'll do a second
# round of dropping NA values from the entire output dataframe, which will
# result in the removal of the rows.
dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].dropna()
else:
raise ValueError('Invalid missing value strategy')
def get_split(
dataset_df,
force_split=False,
split_probabilities=(0.7, 0.1, 0.2),
stratify=None,
backend=LOCAL_BACKEND,
random_seed=default_random_seed,
):
if SPLIT in dataset_df and not force_split:
split = dataset_df[SPLIT]
else:
set_random_seed(random_seed)
if stratify is None or stratify not in dataset_df:
split = dataset_df.index.to_series().map(
lambda x: np.random.choice(3, 1, p=split_probabilities)
).astype(np.int8)
else:
split = np.zeros(len(dataset_df))
for val in dataset_df[stratify].unique():
# TODO dask: find a way to better parallelize this operation
idx_list = (
dataset_df.index[dataset_df[stratify] == val].tolist()
)
array_lib = backend.df_engine.array_lib
val_list = array_lib.random.choice(
3,
len(idx_list),
p=split_probabilities,
).astype(np.int8)
split[idx_list] = val_list
return split
def load_hdf5(
hdf5_file_path,
features,
split_data=True,
shuffle_training=False
):
# TODO dask: this needs to work with DataFrames
logger.info('Loading data from: {0}'.format(hdf5_file_path))
def shuffle(df):
return df.sample(frac=1).reset_index(drop=True)
dataset = data_utils.load_hdf5(hdf5_file_path)
if not split_data:
if shuffle_training:
dataset = shuffle(dataset)
return dataset
training_set, test_set, validation_set = split_dataset_ttv(dataset, SPLIT)
if shuffle_training:
training_set = shuffle(training_set)
return training_set, test_set, validation_set
def load_metadata(metadata_file_path):
logger.info('Loading metadata from: {0}'.format(metadata_file_path))
return data_utils.load_json(metadata_file_path)
def preprocess_for_training(
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
# sanity check to make sure some data source is provided
if dataset is None and training_set is None:
raise ValueError('No training data is provided!')
# determine data format if not provided or auto
if not data_format or data_format == 'auto':
data_format = figure_data_format(
dataset, training_set, validation_set, test_set
)
# if training_set_metadata is a string, assume it's a path to load the json
training_set_metadata = training_set_metadata or {}
if training_set_metadata and isinstance(training_set_metadata, str):
training_set_metadata = load_metadata(training_set_metadata)
# setup
features = (config['input_features'] +
config['output_features'])
# in case data_format is one of the cacheable formats,
# check if there's a cached hdf5 file with the same name,
# and in case move on with the hdf5 branch.
cached = False
cache = backend.cache.get_dataset_cache(
config, dataset, training_set, test_set, validation_set
)
if data_format in CACHEABLE_FORMATS:
cache_results = cache.get()
if cache_results is not None:
valid, *cache_values = cache_results
if valid:
logger.info(
'Found cached dataset and meta.json with the same filename '
'of the dataset, using them instead'
)
training_set_metadata, training_set, test_set, validation_set = cache_values
config['data_hdf5_fp'] = training_set
data_format = backend.cache.data_format
cached = True
dataset = None
else:
logger.info(
"Found cached dataset and meta.json with the same filename "
"of the dataset, but checksum don't match, "
"if saving of processed input is not skipped "
"they will be overridden"
)
cache.delete()
training_set_metadata[CHECKSUM] = cache.checksum
data_format_processor = get_from_registry(
data_format,
data_format_preprocessor_registry
)
if cached or data_format == 'hdf5':
# Always interpret hdf5 files as preprocessed, even if missing from the cache
processed = data_format_processor.prepare_processed_data(
features,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
training_set, test_set, validation_set, training_set_metadata = processed
else:
processed = data_format_processor.preprocess_for_training(
features,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
skip_save_processed_input=skip_save_processed_input,
preprocessing_params=preprocessing_params,
backend=backend,
random_seed=random_seed
)
training_set, test_set, validation_set, training_set_metadata = processed
replace_text_feature_level(
features,
[training_set, validation_set, test_set]
)
processed = (training_set, test_set, validation_set, training_set_metadata)
# cache the dataset
if backend.cache.can_cache(skip_save_processed_input):
processed = cache.put(*processed)
training_set, test_set, validation_set, training_set_metadata = processed
training_dataset = backend.dataset_manager.create(
training_set,
config,
training_set_metadata
)
validation_dataset = None
if validation_set is not None:
validation_dataset = backend.dataset_manager.create(
validation_set,
config,
training_set_metadata
)
test_dataset = None
if test_set is not None:
test_dataset = backend.dataset_manager.create(
test_set,
config,
training_set_metadata
)
return (
training_dataset,
validation_dataset,
test_dataset,
training_set_metadata
)
def _preprocess_file_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
read_fn=read_csv,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
"""
Method to pre-process csv data
:param features: list of all features (input + output)
:param dataset: path to the data
:param training_set: training data
:param validation_set: validation data
:param test_set: test data
:param training_set_metadata: train set metadata
:param skip_save_processed_input: if False, the pre-processed data is saved
as .hdf5 files in the same location as the csvs with the same names.
:param preprocessing_params: preprocessing parameters
:param random_seed: random seed
:return: training, test, validation datasets, training metadata
"""
if dataset:
# Use data and ignore _train, _validation and _test.
# Also ignore data and train set metadata needs preprocessing
logger.info(
'Using full raw dataset, no hdf5 and json file '
'with the same name have been found'
)
logger.info('Building dataset (it may take a while)')
dataset_df = read_fn(dataset, backend.df_engine.df_lib)
training_set_metadata[SRC] = dataset
data, training_set_metadata = build_dataset(
dataset_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend,
random_seed=random_seed,
skip_save_processed_input=skip_save_processed_input
)
if backend.is_coordinator() and not skip_save_processed_input:
# save split values for use by visualization routines
split_fp = get_split_path(dataset)
save_array(split_fp, data[SPLIT])
# TODO dask: https://docs.dask.org/en/latest/dataframe-api.html#dask.dataframe.DataFrame.random_split
training_data, test_data, validation_data = split_dataset_ttv(
data,
SPLIT
)
elif training_set:
# use data_train (including _validation and _test if they are present)
# and ignore data and train set metadata
# needs preprocessing
logger.info(
'Using training raw csv, no hdf5 and json '
'file with the same name have been found'
)
logger.info('Building dataset (it may take a while)')
concatenated_df = concatenate_files(
training_set,
validation_set,
test_set,
read_fn,
backend
)
training_set_metadata[SRC] = training_set
data, training_set_metadata = build_dataset(
concatenated_df,
features,
preprocessing_params,
metadata=training_set_metadata,
backend=backend,
random_seed=random_seed
)
training_data, test_data, validation_data = split_dataset_ttv(
data,
SPLIT
)
else:
raise ValueError('either data or data_train have to be not None')
return training_data, test_data, validation_data, training_set_metadata
def _preprocess_df_for_training(
features,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
preprocessing_params=default_preprocessing_parameters,
backend=LOCAL_BACKEND,
random_seed=default_random_seed
):
""" Method to pre-process dataframes. This doesn't have the option to save the
processed data as hdf5 as we don't expect users to do this as the data can
be processed in memory
"""
if dataset is not None:
# needs preprocessing
logger.info('Using full dataframe')
logger.info('Building dataset (it may take a while)')
elif training_set is not None:
# needs preprocessing
logger.info('Using training dataframe')
logger.info('Building dataset (it may take a while)')
dataset = concatenate_df(
training_set,
validation_set,
test_set,
backend
)
dataset, training_set_metadata = build_dataset(
dataset,
features,
preprocessing_params,
metadata=training_set_metadata,
random_seed=random_seed,
backend=backend
)
training_set, test_set, validation_set = split_dataset_ttv(
dataset,
SPLIT
)
return training_set, test_set, validation_set, training_set_metadata
def preprocess_for_prediction(
config,
dataset,
training_set_metadata=None,
data_format=None,
split=FULL,
include_outputs=True,
backend=LOCAL_BACKEND
):
"""Preprocesses the dataset to parse it into a format that is usable by the
Ludwig core
:param model_path: The input data that is joined with the model
hyperparameter file to create the config file
:param data_csv: The CSV input data file
:param data_hdf5: The hdf5 data file if there is no csv data file
:param training_set_metadata: Train set metadata for the input features
:param split: the split of dataset to return
:returns: Dataset, Train set metadata
"""
# Sanity Check to make sure some data source is provided
if dataset is None:
raise ValueError('No training data is provided!')
if isinstance(dataset, Dataset):
return dataset, training_set_metadata
# determine data format if not provided or auto
if not data_format or data_format == 'auto':
data_format = figure_data_format(dataset)
# manage the in_memory parameter
if data_format not in HDF5_FORMATS:
num_overrides = override_in_memory_flag(
config['input_features'],
True
)
if num_overrides > 0:
logger.warning(
'Using in_memory = False is not supported '
'with {} data format.'.format(data_format)
)
preprocessing_params = merge_dict(
default_preprocessing_parameters,
config[PREPROCESSING]
)
# if training_set_metadata is a string, assume it's a path to load the json
if training_set_metadata and isinstance(training_set_metadata, str):
training_set_metadata = load_metadata(training_set_metadata)
# setup
output_features = []
if include_outputs:
output_features += config['output_features']
features = config['input_features'] + output_features
# Check the cache for an already preprocessed dataset. This only
# applies to scenarios where the user wishes to predict on a split
# of the full dataset, where we preprocess the whole dataset together
# during training. If the user wishes to predict on the full dataset,
# it is assumed they are predicting on unseen data. This is done
# because the cached data is stored in its split form, and would be
# expensive to recombine, requiring further caching.
cached = False
cache = backend.cache.get_dataset_cache(config, dataset)
training_set = test_set = validation_set = None
if data_format in CACHEABLE_FORMATS and split != FULL:
cache_results = cache.get()
if cache_results is not None:
valid, *cache_values = cache_results
if valid:
logger.info(
'Found cached dataset and meta.json with the same filename '
'of the input file, using them instead'
)
training_set_metadata, training_set, test_set, validation_set = cache_values
config['data_hdf5_fp'] = training_set
data_format = backend.cache.data_format
cached = True
data_format_processor = get_from_registry(
data_format,
data_format_preprocessor_registry
)
if cached:
processed = data_format_processor.prepare_processed_data(
features,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
preprocessing_params=preprocessing_params,
backend=backend,
)
training_set, test_set, validation_set, training_set_metadata = processed
else:
processed = data_format_processor.preprocess_for_prediction(
dataset,
features,
preprocessing_params,
training_set_metadata,
backend
)
dataset, training_set_metadata, new_hdf5_fp = processed
training_set_metadata = training_set_metadata.copy()
if new_hdf5_fp:
training_set_metadata[DATA_TRAIN_HDF5_FP] = new_hdf5_fp
replace_text_feature_level(features, [dataset])
if split != FULL:
training_set, test_set, validation_set = split_dataset_ttv(
dataset,
SPLIT
)
if split == TRAINING:
dataset = training_set
elif split == VALIDATION:
dataset = validation_set
elif split == TEST:
dataset = test_set
config = {
**config,
'output_features': output_features,
}
dataset = backend.dataset_manager.create_inference_dataset(
dataset,
split,
config,
training_set_metadata,
)
return dataset, training_set_metadata
def replace_text_feature_level(features, datasets):
for feature in features:
if feature[TYPE] == TEXT:
for dataset in datasets:
if dataset is not None:
dataset[feature[PROC_COLUMN]] = dataset[
'{}_{}'.format(
feature[PROC_COLUMN],
feature['level']
)
]
for level in ('word', 'char'):
name_level = '{}_{}'.format(
feature[PROC_COLUMN],
level)
if name_level in dataset:
del dataset[name_level]
| 33.106717
| 109
| 0.616264
|
b2f4aad9bb9dad1954e58fdb5773e0cdacd496d5
| 16,819
|
py
|
Python
|
ms_deisotope/peak_dependency_network/intervals.py
|
mstim/ms_deisotope
|
29f4f466e92e66b65a2d21eca714aa627caa21db
|
[
"Apache-2.0"
] | 18
|
2017-09-01T12:26:12.000Z
|
2022-02-23T02:31:29.000Z
|
ms_deisotope/peak_dependency_network/intervals.py
|
mstim/ms_deisotope
|
29f4f466e92e66b65a2d21eca714aa627caa21db
|
[
"Apache-2.0"
] | 19
|
2017-03-12T20:40:36.000Z
|
2022-03-31T22:50:47.000Z
|
ms_deisotope/peak_dependency_network/intervals.py
|
mstim/ms_deisotope
|
29f4f466e92e66b65a2d21eca714aa627caa21db
|
[
"Apache-2.0"
] | 14
|
2016-05-06T02:25:30.000Z
|
2022-03-31T14:40:06.000Z
|
from collections import deque
class SpanningMixin(object):
"""Provides methods for checking whether an entity
which has a defined start and end point over a single
dimension contains or overlaps with another entity in
that same dimension.
"""
def __init__(self, start, end):
self.start = start
self.end = end
def __contains__(self, i):
"""Tests for point inclusion, `start <= i <= end`
Parameters
----------
i : Number
The point to be tested
Returns
-------
bool
"""
return self.contains(i)
def contains(self, i):
"""Tests for point inclusion, `start <= i <= end`
Parameters
----------
i : Number
The point to be tested
Returns
-------
bool
"""
return self.start <= i <= self.end
def overlaps(self, interval):
"""Tests whether another spanning entity with
a defined start and end point overlaps with
this spanning entity
Parameters
----------
interval : SpanningMixin
Returns
-------
bool
"""
cond = ((self.start <= interval.start and self.end >= interval.start) or (
self.start >= interval.start and self.end <= interval.end) or (
self.start >= interval.start and self.end >= interval.end and self.start <= interval.end) or (
self.start <= interval.start and self.end >= interval.start) or (
self.start <= interval.end and self.end >= interval.end))
return cond
def overlap_size(self, interval):
if interval.start in self and interval.end in self:
return interval.end - interval.start
elif self.start in interval and self.end in interval:
return self.end - self.start
if self.end >= interval.start:
return self.end - interval.start
elif self.start >= interval.end:
return self.start - interval.end
def is_contained_in_interval(self, interval):
"""Tests whether this spanning entity is
completely contained inside the other entity
Parameters
----------
interval : SpanningMixin
Returns
-------
bool
"""
return self.start >= interval.start and self.end <= interval.end
def contains_interval(self, interval):
"""Tests whether the other spanning entity is
completely contained inside this entity
Parameters
----------
interval : SpanningMixin
Returns
-------
bool
"""
return self.start <= interval.start and self.end >= interval.end
class Interval(SpanningMixin):
"""A generic wrapper around data associated
with a particular interval over a single dimension
Attributes
----------
data : dict
A holder of arbitrary extra data not associated
with any single member contained in this interval
end : Number
The end point of the interval described
members : list
A list of arbitrary objects which are associated with this
interval.
start : Number
The start point of the interval described
"""
def __init__(self, start, end, members=None, **kwargs):
if members is None:
members = []
self.start = start
self.end = end
self.members = members
self.data = kwargs
def __repr__(self):
return "%s(start=%r, end=%r, data=%r)" % (self.__class__.__name__, self.start, self.end, self.data)
def __getitem__(self, k):
return self.members[k]
def __len__(self):
return len(self.members)
def __iter__(self):
return iter(self.members)
class IntervalTreeNode(object):
"""A single node in an Interval Tree. The
root node of an interval tree can be treated
as the tree itself.
Attributes
----------
center : Number
The center point of this node's collection
contained : list
The list of Interval-like objects which
were aggregated under this node. This collection
of intervals all span this node's center.
end : Number
The end point of this node's collection or it's
rightmost child's end point, whichever is larger
left : IntervalTreeNode
The left child node of this node. May be `None`
if this node is a leaf node. Contains all
intervals whose end point is less than this
node's center
level : int
Depth in the tree
parent : IntervalTree
This node's parent node. May be None if this
node is the root node.
right : IntervalTree
The right child node of this node. May be `None`
if this node is a leaf node. Contains all
intervals whose start point is greater than this
node's center
start : Number
The start point of this node's collection or it's
leftmost child's start point, whichever is smaller
"""
def __init__(self, center, left, contained, right, level=0, parent=None):
self.center = center
self.left = left
self.contained = contained
self.right = right
self.level = level
self.parent = parent
start = float('inf')
end = -float('inf')
i = 0
for interval in self.contained:
i += 1
if interval.start < start:
start = interval.start
if interval.end > end:
end = interval.end
if i > 0:
self.start = start
self.end = end
else:
self.start = self.end = center
def contains_point(self, x):
"""Returns the list of contained intervals for all
nodes which contain the point `x`.
Parameters
----------
x : Number
The query point
Returns
-------
list
A list of objects which span `x` from all spanning
nodes' `contained` list.
"""
inner = [
i for i in self.contained
if i.start <= x <= i.end
]
if self.left is not None and self.left.start <= x <= self.left.end:
return self.left.contains_point(x) + inner
if self.right is not None and self.right.start <= x <= self.right.end:
return self.right.contains_point(x) + inner
else:
return inner
def _overlaps_interval(self, start, end):
result = []
query = Interval(start, end)
for i in self.contained:
if query.overlaps(i):
result.append(i)
return result
def overlaps(self, start, end):
"""Returns the list of all contained intervals which
overlap with the interval described by `start` and `end`
Parameters
----------
start : Number
The start of the query interval
end : Number
The end of the query interval
Returns
-------
list
A list of all objects which overlap the argument interval
from all spanning nodes' `contained` list.
"""
result = []
if start > self.end:
return result
if end < self.start:
return result
elif start <= self.start:
if end < self.start:
return result
else:
if self.left is not None:
result.extend(self.left.overlaps(start, end))
result.extend(self._overlaps_interval(start, end))
if self.right is not None and end >= self.right.start:
result.extend(self.right.overlaps(start, end))
elif start > self.start:
if self.left is not None and self.left.end >= start:
result.extend(self.left.overlaps(start, end))
result.extend(self._overlaps_interval(start, end))
if self.right is not None and end >= self.right.start:
result.extend(self.right.overlaps(start, end))
elif end > self.start:
if self.left is not None:
result.extend(self.left.overlaps(start, end))
result.extend(self._overlaps_interval(start, end))
if self.right is not None and end >= self.right.start:
result.extend(self.right.overlaps(start, end))
return result
def node_contains(self, point):
return self.start <= point <= self.end
def __repr__(self):
return "IntervalTreeNode(level=%d, center=%0.4f, start=%0.4f, end=%0.4f)" % (
self.level, self.center, self.start, self.end)
def __eq__(self, other):
if other is None:
return False
else:
result = self.contained == other.contained
if result:
result = self.left == other.left
if result:
result = self.right == other.right
return result
def __hash__(self):
return hash((self.start, self.center, self.right, self.level))
def __diagnostic_eq__(self, other): # pragma: no cover
if other is None:
return False
else:
result = self.contained == other.contained
if result:
try:
result = self.left.__diagnostic_eq__(other.left)
except AttributeError:
result = self.left == other.left
if result:
try:
result = self.right.__diagnostic_eq__(other.right)
except AttributeError:
result = self.right == other.right
if not result:
print(self.right, "r!=", other.right)
else:
print(self.left, "l!=", other.left)
else:
print(self, "!=", other)
return result
def _update_bounds(self):
changed = False
if self.left is not None and self.left.start < self.start:
changed = True
self.start = self.left.start
if self.right is not None and self.right.end > self.end:
changed = True
self.end = self.right.end
if self.parent is not None:
self.parent._update_bounds()
return changed
def insert(self, interval):
insert_in_self = False
if self.node_contains(interval.start):
if self.left is not None and self.left.node_contains(interval.end):
return self.left.insert(interval)
elif (self.right is not None and self.right.node_contains(interval.end) and
self.right.node_contains(interval.start)):
return self.right.insert(interval)
else:
insert_in_self = True
elif self.node_contains(interval.end):
if self.right is not None and self.right.node_contains(interval.start):
return self.right.insert(interval)
else:
insert_in_self = True
if not insert_in_self and self.parent is None:
insert_in_self = True
if insert_in_self:
self.contained.append(interval)
changed = False
if interval.start < self.start:
self.start = interval.start
changed = True
if interval.end > self.end:
self.end = interval.end
changed = True
if changed:
self._update_bounds()
def flatten(self):
# perform an infix traversal of the tree and collect
# the elements
items = []
stack = deque([])
current = self
done = False
while not done:
if current is not None:
stack.append(current)
current = current.left
else:
if stack:
current = stack.pop()
items.extend(current.contained)
current = current.right
else:
done = True
return items
def balance(self):
items = self.flatten()
tree = self.build(items)
self.left = tree.left
self.right = tree.right
self.contained = tree.contained
self.start = tree.start
self.end = tree.end
self.center = tree.center
self.left.parent = self
self.right.parent = self
self._update_bounds()
def iterative_build_interval_tree(cls, intervals):
"""Builds an IntervalTreeNode hierarchy from `intervals`. This
iterative method is preferable to avoid recursion limits.
Parameters
----------
intervals : Iterable of Intervals
The set of Interval-like objects to use to construct
the Interval Tree
Returns
-------
IntervalTreeNode
The root of the constructed Interval Tree
"""
stack = []
root = cls(0, None, [], None, -1)
if not intervals:
return root
stack.append((root, intervals, "left"))
while stack:
parent, members, side = stack.pop()
if len(members) > 0:
centers = [
float(i.start + i.end) / 2. for i in members
]
left = []
right = []
contained = []
center = sum(centers) / (len(centers))
if len(members) > 20:
for i in members:
if abs(i.start - center) < 1e-6 and abs(i.end - center) < 1e-6:
contained.append(i)
elif center > i.end:
left.append(i)
elif center < i.start:
right.append(i)
else:
contained.append(i)
else:
contained = members[:]
if len(right) == len(members) or len(left) == len(members):
contained = members[:]
left = []
right = []
node = cls(center, left=None, contained=contained, right=None, level=parent.level + 1, parent=parent)
if side == 'left':
parent.left = node
parent.start = min(node.start, parent.start)
up = parent.parent
while up is not None:
if up.start > node.start:
up.start = node.start
up = up.parent
elif side == 'right':
parent.right = node
parent.end = max(node.end, parent.end)
up = parent.parent
while up is not None:
if up.end < node.end:
up.end = node.end
up = up.parent
else:
raise ValueError(side)
stack.append((node, left, "left"))
stack.append((node, right, "right"))
return root.left
IntervalTreeNode.build = classmethod(iterative_build_interval_tree)
def recursive_build_interval_tree(cls, intervals, level=0): # pragma: no cover
if len(intervals) > 0:
centers = [
float(i.start + i.end) / 2. for i in intervals
]
left = []
right = []
contained = []
center = sum(centers) / (len(centers) + 1.)
if len(intervals) > 20:
for i in intervals:
if center >= i.end:
left.append(i)
elif center < i.start:
right.append(i)
else:
contained.append(i)
left = cls.recursive_build_interval_tree(left, level + 1)
right = cls.recursive_build_interval_tree(right, level + 1)
else:
contained = intervals
left = None
right = None
inst = cls(center, left, contained, right, level)
if left is not None:
inst.left.parent = inst
inst.start = min(inst.start, left.start)
if right is not None:
inst.right.parent = inst
inst.end = (max(inst.end, right.end))
return inst
else:
return None
IntervalTreeNode.recursive_build_interval_tree = classmethod(
recursive_build_interval_tree)
try:
has_c = True
_SpanningMixin = SpanningMixin
_Interval = Interval
_IntervalTreeNode = IntervalTreeNode
from ms_deisotope._c.peak_dependency_network.intervals import (
SpanningMixin, Interval, IntervalTreeNode)
except ImportError:
has_c = False
| 32.220307
| 113
| 0.542006
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.